Home | History | Annotate | Download | only in testing
      1 // Copyright 2015-2016 The gRPC Authors
      2 //
      3 // Licensed under the Apache License, Version 2.0 (the "License");
      4 // you may not use this file except in compliance with the License.
      5 // You may obtain a copy of the License at
      6 //
      7 //     http://www.apache.org/licenses/LICENSE-2.0
      8 //
      9 // Unless required by applicable law or agreed to in writing, software
     10 // distributed under the License is distributed on an "AS IS" BASIS,
     11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 // See the License for the specific language governing permissions and
     13 // limitations under the License.
     14 syntax = "proto3";
     15 
     16 import "grpc/testing/payloads.proto";
     17 import "grpc/testing/stats.proto";
     18 
     19 package grpc.testing;
     20 
     21 option java_package = "io.grpc.benchmarks.proto";
     22 option java_outer_classname = "Control";
     23 
     24 enum ClientType {
     25   // Many languages support a basic distinction between using
     26   // sync or async client, and this allows the specification
     27   SYNC_CLIENT = 0;
     28   ASYNC_CLIENT = 1;
     29   OTHER_CLIENT = 2; // used for some language-specific variants
     30 }
     31 
     32 enum ServerType {
     33   SYNC_SERVER = 0;
     34   ASYNC_SERVER = 1;
     35   ASYNC_GENERIC_SERVER = 2;
     36   OTHER_SERVER = 3; // used for some language-specific variants
     37 }
     38 
     39 enum RpcType {
     40   UNARY = 0;
     41   STREAMING = 1;
     42   STREAMING_FROM_CLIENT = 2;
     43   STREAMING_FROM_SERVER = 3;
     44   STREAMING_BOTH_WAYS = 4;
     45 }
     46 
     47 // Parameters of poisson process distribution, which is a good representation
     48 // of activity coming in from independent identical stationary sources.
     49 message PoissonParams {
     50   // The rate of arrivals (a.k.a. lambda parameter of the exp distribution).
     51   double offered_load = 1;
     52 }
     53 
     54 // Once an RPC finishes, immediately start a new one.
     55 // No configuration parameters needed.
     56 message ClosedLoopParams {}
     57 
     58 message LoadParams {
     59   oneof load {
     60     ClosedLoopParams closed_loop = 1;
     61     PoissonParams poisson = 2;
     62   };
     63 }
     64 
     65 // presence of SecurityParams implies use of TLS
     66 message SecurityParams {
     67   bool use_test_ca = 1;
     68   string server_host_override = 2;
     69 }
     70 
     71 message ChannelArg {
     72   string name = 1;
     73   oneof value {
     74     string str_value = 2;
     75     int32 int_value = 3;
     76   }
     77 }
     78 
     79 message ClientConfig {
     80   // List of targets to connect to. At least one target needs to be specified.
     81   repeated string server_targets = 1;
     82   ClientType client_type = 2;
     83   SecurityParams security_params = 3;
     84   // How many concurrent RPCs to start for each channel.
     85   // For synchronous client, use a separate thread for each outstanding RPC.
     86   int32 outstanding_rpcs_per_channel = 4;
     87   // Number of independent client channels to create.
     88   // i-th channel will connect to server_target[i % server_targets.size()]
     89   int32 client_channels = 5;
     90   // Only for async client. Number of threads to use to start/manage RPCs.
     91   int32 async_client_threads = 7;
     92   RpcType rpc_type = 8;
     93   // The requested load for the entire client (aggregated over all the threads).
     94   LoadParams load_params = 10;
     95   PayloadConfig payload_config = 11;
     96   HistogramParams histogram_params = 12;
     97 
     98   // Specify the cores we should run the client on, if desired
     99   repeated int32 core_list = 13;
    100   int32 core_limit = 14;
    101 
    102   // If we use an OTHER_CLIENT client_type, this string gives more detail
    103   string other_client_api = 15;
    104 
    105   repeated ChannelArg channel_args = 16;
    106 
    107   // Number of messages on a stream before it gets finished/restarted
    108   int32 messages_per_stream = 18;
    109 }
    110 
    111 message ClientStatus { ClientStats stats = 1; }
    112 
    113 // Request current stats
    114 message Mark {
    115   // if true, the stats will be reset after taking their snapshot.
    116   bool reset = 1;
    117 }
    118 
    119 message ClientArgs {
    120   oneof argtype {
    121     ClientConfig setup = 1;
    122     Mark mark = 2;
    123   }
    124 }
    125 
    126 message ServerConfig {
    127   ServerType server_type = 1;
    128   SecurityParams security_params = 2;
    129   // Port on which to listen. Zero means pick unused port.
    130   int32 port = 4;
    131   // Only for async server. Number of threads used to serve the requests.
    132   int32 async_server_threads = 7;
    133   // Specify the number of cores to limit server to, if desired
    134   int32 core_limit = 8;
    135   // payload config, used in generic server.
    136   // Note this must NOT be used in proto (non-generic) servers. For proto servers,
    137   // 'response sizes' must be configured from the 'response_size' field of the
    138   // 'SimpleRequest' objects in RPC requests.
    139   PayloadConfig payload_config = 9;
    140 
    141   // Specify the cores we should run the server on, if desired
    142   repeated int32 core_list = 10;
    143 
    144   // If we use an OTHER_SERVER client_type, this string gives more detail
    145   string other_server_api = 11;
    146 
    147   // c++-only options (for now) --------------------------------
    148 
    149   // Buffer pool size (no buffer pool specified if unset)
    150   int32 resource_quota_size = 1001;
    151 }
    152 
    153 message ServerArgs {
    154   oneof argtype {
    155     ServerConfig setup = 1;
    156     Mark mark = 2;
    157   }
    158 }
    159 
    160 message ServerStatus {
    161   ServerStats stats = 1;
    162   // the port bound by the server
    163   int32 port = 2;
    164   // Number of cores available to the server
    165   int32 cores = 3;
    166 }
    167 
    168 message CoreRequest {
    169 }
    170 
    171 message CoreResponse {
    172   // Number of cores available on the server
    173   int32 cores = 1;
    174 }
    175 
    176 message Void {
    177 }
    178 
    179 // A single performance scenario: input to qps_json_driver
    180 message Scenario {
    181   // Human readable name for this scenario
    182   string name = 1;
    183   // Client configuration
    184   ClientConfig client_config = 2;
    185   // Number of clients to start for the test
    186   int32 num_clients = 3;
    187   // Server configuration
    188   ServerConfig server_config = 4;
    189   // Number of servers to start for the test
    190   int32 num_servers = 5;
    191   // Warmup period, in seconds
    192   int32 warmup_seconds = 6;
    193   // Benchmark time, in seconds
    194   int32 benchmark_seconds = 7;
    195   // Number of workers to spawn locally (usually zero)
    196   int32 spawn_local_worker_count = 8;
    197 }
    198 
    199 // A set of scenarios to be run with qps_json_driver
    200 message Scenarios {
    201   repeated Scenario scenarios = 1;
    202 }
    203 
    204 // Basic summary that can be computed from ClientStats and ServerStats
    205 // once the scenario has finished.
    206 message ScenarioResultSummary
    207 {
    208   // Total number of operations per second over all clients.
    209   double qps = 1;
    210   // QPS per one server core.
    211   double qps_per_server_core = 2;
    212   // server load based on system_time (0.85 => 85%)
    213   double server_system_time = 3;
    214   // server load based on user_time (0.85 => 85%)
    215   double server_user_time = 4;
    216   // client load based on system_time (0.85 => 85%)
    217   double client_system_time = 5;
    218   // client load based on user_time (0.85 => 85%)
    219   double client_user_time = 6;
    220 
    221   // X% latency percentiles (in nanoseconds)
    222   double latency_50 = 7;
    223   double latency_90 = 8;
    224   double latency_95 = 9;
    225   double latency_99 = 10;
    226   double latency_999 = 11;
    227 
    228   // server cpu usage percentage
    229   double server_cpu_usage = 12;
    230 
    231   // Number of requests that succeeded/failed
    232   double successful_requests_per_second = 13;
    233   double failed_requests_per_second = 14;
    234 
    235   // Number of polls called inside completion queue per request
    236   double client_polls_per_request = 15;
    237   double server_polls_per_request = 16;
    238 }
    239 
    240 // Results of a single benchmark scenario.
    241 message ScenarioResult {
    242   // Inputs used to run the scenario.
    243   Scenario scenario = 1;
    244   // Histograms from all clients merged into one histogram.
    245   HistogramData latencies = 2;
    246   // Client stats for each client
    247   repeated ClientStats client_stats = 3;
    248   // Server stats for each server
    249   repeated ServerStats server_stats = 4;
    250   // Number of cores available to each server
    251   repeated int32 server_cores = 5;
    252   // An after-the-fact computed summary
    253   ScenarioResultSummary summary = 6;
    254   // Information on success or failure of each worker
    255   repeated bool client_success = 7;
    256   repeated bool server_success = 8;
    257   // Number of failed requests (one row per status code seen)
    258   repeated RequestResultCount request_results = 9;
    259 }
    260