Merge pull request #82 from tbetbetbe/grpc_ruby_add_rubocop_fix_lint_style_issues

Grpc ruby add rubocop fix lint style issues
pull/89/head
Michael Lumish 10 years ago
commit 861c79b08a
  1. 10
      src/ruby/.rubocop.yml
  2. 52
      src/ruby/.rubocop_todo.yml
  3. 32
      src/ruby/Rakefile
  4. 57
      src/ruby/bin/interop/interop_client.rb
  5. 35
      src/ruby/bin/interop/interop_server.rb
  6. 25
      src/ruby/bin/math_client.rb
  7. 44
      src/ruby/bin/math_server.rb
  8. 12
      src/ruby/bin/noproto_client.rb
  9. 16
      src/ruby/bin/noproto_server.rb
  10. 26
      src/ruby/ext/grpc/extconf.rb
  11. 29
      src/ruby/grpc.gemspec
  12. 19
      src/ruby/lib/grpc/beefcake.rb
  13. 7
      src/ruby/lib/grpc/core/event.rb
  14. 18
      src/ruby/lib/grpc/core/time_consts.rb
  15. 9
      src/ruby/lib/grpc/errors.rb
  16. 919
      src/ruby/lib/grpc/generic/active_call.rb
  17. 337
      src/ruby/lib/grpc/generic/bidi_call.rb
  18. 706
      src/ruby/lib/grpc/generic/client_stub.rb
  19. 131
      src/ruby/lib/grpc/generic/rpc_desc.rb
  20. 648
      src/ruby/lib/grpc/generic/rpc_server.rb
  21. 326
      src/ruby/lib/grpc/generic/service.rb
  22. 1
      src/ruby/lib/grpc/version.rb
  23. 2
      src/ruby/spec/alloc_spec.rb
  24. 4
      src/ruby/spec/byte_buffer_spec.rb
  25. 60
      src/ruby/spec/call_spec.rb
  26. 37
      src/ruby/spec/channel_spec.rb
  27. 82
      src/ruby/spec/client_server_spec.rb
  28. 5
      src/ruby/spec/completion_queue_spec.rb
  29. 14
      src/ruby/spec/credentials_spec.rb
  30. 22
      src/ruby/spec/event_spec.rb
  31. 48
      src/ruby/spec/generic/active_call_spec.rb
  32. 143
      src/ruby/spec/generic/client_stub_spec.rb
  33. 117
      src/ruby/spec/generic/rpc_desc_spec.rb
  34. 35
      src/ruby/spec/generic/rpc_server_pool_spec.rb
  35. 139
      src/ruby/spec/generic/rpc_server_spec.rb
  36. 58
      src/ruby/spec/generic/service_spec.rb
  37. 2
      src/ruby/spec/metadata_spec.rb
  38. 4
      src/ruby/spec/port_picker.rb
  39. 13
      src/ruby/spec/server_credentials_spec.rb
  40. 50
      src/ruby/spec/server_spec.rb
  41. 4
      src/ruby/spec/time_consts_spec.rb

@ -0,0 +1,10 @@
# This is the configuration used to check the rubocop source code.
inherit_from: .rubocop_todo.yml
AllCops:
Exclude:
- 'bin/apis/**/*'
- 'bin/interop/test/**/*'
- 'bin/math.rb'
- 'bin/math_services.rb'

@ -0,0 +1,52 @@
# This configuration was generated by `rubocop --auto-gen-config`
# on 2015-01-16 02:30:04 -0800 using RuboCop version 0.28.0.
# The point is for the user to remove these configuration records
# one by one as the offenses are removed from the code base.
# Note that changes in the inspected code, or installation of new
# versions of RuboCop, may require this file to be generated again.
# Offense count: 3
# Lint/UselessAssignment:
# Enabled: false
# Offense count: 33
Metrics/AbcSize:
Max: 39
# Offense count: 3
# Configuration parameters: CountComments.
Metrics/ClassLength:
Max: 231
# Offense count: 2
Metrics/CyclomaticComplexity:
Max: 8
# Offense count: 36
# Configuration parameters: CountComments.
Metrics/MethodLength:
Max: 37
# Offense count: 8
# Configuration parameters: CountKeywordArgs.
Metrics/ParameterLists:
Max: 8
# Offense count: 2
Metrics/PerceivedComplexity:
Max: 10
# Offense count: 7
# Configuration parameters: AllowedVariables.
Style/GlobalVars:
Enabled: false
# Offense count: 1
# Configuration parameters: EnforcedStyle, MinBodyLength, SupportedStyles.
Style/Next:
Enabled: false
# Offense count: 2
# Configuration parameters: Methods.
Style/SingleLineBlockParams:
Enabled: false

@ -1,46 +1,44 @@
# -*- ruby -*-
require 'rake/extensiontask'
require 'rspec/core/rake_task'
require 'rubocop/rake_task'
desc 'Run Rubocop to check for style violations'
RuboCop::RakeTask.new
Rake::ExtensionTask.new 'grpc' do |ext|
ext.lib_dir = File.join('lib', 'grpc')
end
SPEC_SUITES = [
{ :id => :wrapper, :title => 'wrapper layer', :files => %w(spec/*.rb) },
{ :id => :idiomatic, :title => 'idiomatic layer', :dir => %w(spec/generic),
:tag => '~bidi' },
{ :id => :bidi, :title => 'bidi tests', :dir => %w(spec/generic),
:tag => 'bidi' }
{ id: :wrapper, title: 'wrapper layer', files: %w(spec/*.rb) },
{ id: :idiomatic, title: 'idiomatic layer', dir: %w(spec/generic),
tag: '~bidi' },
{ id: :bidi, title: 'bidi tests', dir: %w(spec/generic),
tag: 'bidi' }
]
desc "Run all RSpec tests"
desc 'Run all RSpec tests'
namespace :spec do
namespace :suite do
SPEC_SUITES.each do |suite|
desc "Run all specs in #{suite[:title]} spec suite"
RSpec::Core::RakeTask.new(suite[:id]) do |t|
spec_files = []
if suite[:files]
suite[:files].each { |f| spec_files += Dir[f] }
end
suite[:files].each { |f| spec_files += Dir[f] } if suite[:files]
if suite[:dirs]
suite[:dirs].each { |f| spec_files += Dir["#{f}/**/*_spec.rb"] }
end
t.pattern = spec_files
if suite[:tag]
t.rspec_opts = "--tag #{suite[:tag]}"
end
t.rspec_opts = "--tag #{suite[:tag]}" if suite[:tag]
end
end
end
end
task :default => "spec:suite:idiomatic" # this should be spec:suite:bidi
task "spec:suite:wrapper" => :compile
task "spec:suite:idiomatic" => "spec:suite:wrapper"
task "spec:suite:bidi" => "spec:suite:idiomatic"
task default: 'spec:suite:idiomatic' # this should be spec:suite:bidi
task 'spec:suite:wrapper' => :compile
task 'spec:suite:idiomatic' => 'spec:suite:wrapper'
task 'spec:suite:bidi' => 'spec:suite:idiomatic'

@ -65,7 +65,7 @@ end
# creates a Credentials from the test certificates.
def test_creds
certs = load_test_certs
creds = GRPC::Core::Credentials.new(certs[0])
GRPC::Core::Credentials.new(certs[0])
end
# creates a test stub that accesses host:port securely.
@ -73,15 +73,15 @@ def create_stub(host, port)
address = "#{host}:#{port}"
stub_opts = {
:creds => test_creds,
GRPC::Core::Channel::SSL_TARGET => 'foo.test.google.com',
GRPC::Core::Channel::SSL_TARGET => 'foo.test.google.com'
}
logger.info("... connecting securely to #{address}")
stub = Grpc::Testing::TestService::Stub.new(address, **stub_opts)
Grpc::Testing::TestService::Stub.new(address, **stub_opts)
end
# produces a string of null chars (\0) of length l.
def nulls(l)
raise 'requires #{l} to be +ve' if l < 0
fail 'requires #{l} to be +ve' if l < 0
[].pack('x' * l).force_encoding('utf-8')
end
@ -102,13 +102,13 @@ class PingPongPlayer
def each_item
return enum_for(:each_item) unless block_given?
req_cls, p_cls= StreamingOutputCallRequest, ResponseParameters # short
req_cls, p_cls = StreamingOutputCallRequest, ResponseParameters # short
count = 0
@msg_sizes.each do |m|
req_size, resp_size = m
req = req_cls.new(:payload => Payload.new(:body => nulls(req_size)),
:response_type => COMPRESSABLE,
:response_parameters => [p_cls.new(:size => resp_size)])
req = req_cls.new(payload: Payload.new(body: nulls(req_size)),
response_type: COMPRESSABLE,
response_parameters: [p_cls.new(size: resp_size)])
yield req
resp = @queue.pop
assert_equal(PayloadType.lookup(COMPRESSABLE), resp.payload.type,
@ -148,11 +148,11 @@ class NamedTests
# ruby server
# FAILED
def large_unary
req_size, wanted_response_size = 271828, 314159
payload = Payload.new(:type => COMPRESSABLE, :body => nulls(req_size))
req = SimpleRequest.new(:response_type => COMPRESSABLE,
:response_size => wanted_response_size,
:payload => payload)
req_size, wanted_response_size = 271_828, 314_159
payload = Payload.new(type: COMPRESSABLE, body: nulls(req_size))
req = SimpleRequest.new(response_type: COMPRESSABLE,
response_size: wanted_response_size,
payload: payload)
resp = @stub.unary_call(req)
assert_equal(wanted_response_size, resp.payload.body.length,
'large_unary: payload had the wrong length')
@ -166,27 +166,27 @@ class NamedTests
# ruby server
# FAILED
def client_streaming
msg_sizes = [27182, 8, 1828, 45904]
wanted_aggregate_size = 74922
msg_sizes = [27_182, 8, 1828, 45_904]
wanted_aggregate_size = 74_922
reqs = msg_sizes.map do |x|
req = Payload.new(:body => nulls(x))
StreamingInputCallRequest.new(:payload => req)
req = Payload.new(body: nulls(x))
StreamingInputCallRequest.new(payload: req)
end
resp = @stub.streaming_input_call(reqs)
assert_equal(wanted_aggregate_size, resp.aggregated_payload_size,
'client_streaming: aggregate payload size is incorrect')
p 'OK: client_streaming'
end
end
# TESTING:
# PASSED
# ruby server
# FAILED
def server_streaming
msg_sizes = [31415, 9, 2653, 58979]
response_spec = msg_sizes.map { |s| ResponseParameters.new(:size => s) }
req = StreamingOutputCallRequest.new(:response_type => COMPRESSABLE,
:response_parameters => response_spec)
msg_sizes = [31_415, 9, 2653, 58_979]
response_spec = msg_sizes.map { |s| ResponseParameters.new(size: s) }
req = StreamingOutputCallRequest.new(response_type: COMPRESSABLE,
response_parameters: response_spec)
resps = @stub.streaming_output_call(req)
resps.each_with_index do |r, i|
assert i < msg_sizes.length, 'too many responses'
@ -203,13 +203,12 @@ class NamedTests
# ruby server
# FAILED
def ping_pong
msg_sizes = [[27182, 31415], [8, 9], [1828, 2653], [45904, 58979]]
msg_sizes = [[27_182, 31_415], [8, 9], [1828, 2653], [45_904, 58_979]]
ppp = PingPongPlayer.new(msg_sizes)
resps = @stub.full_duplex_call(ppp.each_item)
resps.each { |r| ppp.queue.push(r) }
p 'OK: ping_pong'
end
end
# validates the the command line options, returning them as a Hash.
@ -217,7 +216,7 @@ def parse_options
options = {
'server_host' => nil,
'server_port' => nil,
'test_case' => nil,
'test_case' => nil
}
OptionParser.new do |opts|
opts.banner = 'Usage: --server_host <server_host> --server_port server_port'
@ -228,17 +227,17 @@ def parse_options
options['server_port'] = v
end
# instance_methods(false) gives only the methods defined in that class
test_cases = NamedTests.instance_methods(false).map { |t| t.to_s }
test_cases = NamedTests.instance_methods(false).map(&:to_s)
test_case_list = test_cases.join(',')
opts.on("--test_case CODE", test_cases, {}, "select a test_case",
opts.on('--test_case CODE', test_cases, {}, 'select a test_case',
" (#{test_case_list})") do |v|
options['test_case'] = v
end
end.parse!
['server_host', 'server_port', 'test_case'].each do |arg|
%w(server_host, server_port, test_case).each do |arg|
if options[arg].nil?
raise OptionParser::MissingArgument.new("please specify --#{arg}")
fail(OptionParser::MissingArgument, "please specify --#{arg}")
end
end
options

@ -62,12 +62,12 @@ end
# creates a ServerCredentials from the test certificates.
def test_server_creds
certs = load_test_certs
server_creds = GRPC::Core::ServerCredentials.new(nil, certs[1], certs[2])
GRPC::Core::ServerCredentials.new(nil, certs[1], certs[2])
end
# produces a string of null chars (\0) of length l.
def nulls(l)
raise 'requires #{l} to be +ve' if l < 0
fail 'requires #{l} to be +ve' if l < 0
[].pack('x' * l).force_encoding('utf-8')
end
@ -86,7 +86,7 @@ class EnumeratorQueue
loop do
r = @q.pop
break if r.equal?(@sentinel)
raise r if r.is_a?Exception
fail r if r.is_a? Exception
yield r
end
end
@ -98,27 +98,27 @@ class TestTarget < Grpc::Testing::TestService::Service
include Grpc::Testing
include Grpc::Testing::PayloadType
def empty_call(empty, call)
def empty_call(_empty, _call)
Empty.new
end
def unary_call(simple_req, call)
def unary_call(simple_req, _call)
req_size = simple_req.response_size
SimpleResponse.new(:payload => Payload.new(:type => COMPRESSABLE,
:body => nulls(req_size)))
SimpleResponse.new(payload: Payload.new(type: COMPRESSABLE,
body: nulls(req_size)))
end
def streaming_input_call(call)
sizes = call.each_remote_read.map { |x| x.payload.body.length }
sum = sizes.inject { |sum,x| sum + x }
StreamingInputCallResponse.new(:aggregated_payload_size => sum)
sum = sizes.inject { |s, x| s + x }
StreamingInputCallResponse.new(aggregated_payload_size: sum)
end
def streaming_output_call(req, call)
def streaming_output_call(req, _call)
cls = StreamingOutputCallResponse
req.response_parameters.map do |p|
cls.new(:payload => Payload.new(:type => req.response_type,
:body => nulls(p.size)))
cls.new(payload: Payload.new(type: req.response_type,
body: nulls(p.size)))
end
end
@ -126,13 +126,13 @@ class TestTarget < Grpc::Testing::TestService::Service
# reqs is a lazy Enumerator of the requests sent by the client.
q = EnumeratorQueue.new(self)
cls = StreamingOutputCallResponse
t = Thread.new do
Thread.new do
begin
reqs.each do |req|
logger.info("read #{req.inspect}")
resp_size = req.response_parameters[0].size
resp = cls.new(:payload => Payload.new(:type => req.response_type,
:body => nulls(resp_size)))
resp = cls.new(payload: Payload.new(type: req.response_type,
body: nulls(resp_size)))
q.push(resp)
end
logger.info('finished reads')
@ -149,13 +149,12 @@ class TestTarget < Grpc::Testing::TestService::Service
# currently used in any tests
full_duplex_call(reqs)
end
end
# validates the the command line options, returning them as a Hash.
def parse_options
options = {
'port' => nil,
'port' => nil
}
OptionParser.new do |opts|
opts.banner = 'Usage: --port port'
@ -165,7 +164,7 @@ def parse_options
end.parse!
if options['port'].nil?
raise OptionParser::MissingArgument.new("please specify --port")
fail(OptionParser::MissingArgument, 'please specify --port')
end
options
end

@ -29,7 +29,6 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Sample app that accesses a Calc service running on a Ruby gRPC server and
# helps validate RpcServer as a gRPC server using proto2 serialization.
#
@ -49,9 +48,9 @@ include GRPC::Core::TimeConsts
def do_div(stub)
logger.info('request_response')
logger.info('----------------')
req = Math::DivArgs.new(:dividend => 7, :divisor => 3)
req = Math::DivArgs.new(dividend: 7, divisor: 3)
logger.info("div(7/3): req=#{req.inspect}")
resp = stub.div(req, deadline=INFINITE_FUTURE)
resp = stub.div(req, INFINITE_FUTURE)
logger.info("Answer: #{resp.inspect}")
logger.info('----------------')
end
@ -60,7 +59,7 @@ def do_sum(stub)
# to make client streaming requests, pass an enumerable of the inputs
logger.info('client_streamer')
logger.info('---------------')
reqs = [1, 2, 3, 4, 5].map { |x| Math::Num.new(:num => x) }
reqs = [1, 2, 3, 4, 5].map { |x| Math::Num.new(num: x) }
logger.info("sum(1, 2, 3, 4, 5): reqs=#{reqs.inspect}")
resp = stub.sum(reqs) # reqs.is_a?(Enumerable)
logger.info("Answer: #{resp.inspect}")
@ -70,9 +69,9 @@ end
def do_fib(stub)
logger.info('server_streamer')
logger.info('----------------')
req = Math::FibArgs.new(:limit => 11)
req = Math::FibArgs.new(limit: 11)
logger.info("fib(11): req=#{req.inspect}")
resp = stub.fib(req, deadline=INFINITE_FUTURE)
resp = stub.fib(req, INFINITE_FUTURE)
resp.each do |r|
logger.info("Answer: #{r.inspect}")
end
@ -83,11 +82,11 @@ def do_div_many(stub)
logger.info('bidi_streamer')
logger.info('-------------')
reqs = []
reqs << Math::DivArgs.new(:dividend => 7, :divisor => 3)
reqs << Math::DivArgs.new(:dividend => 5, :divisor => 2)
reqs << Math::DivArgs.new(:dividend => 7, :divisor => 2)
reqs << Math::DivArgs.new(dividend: 7, divisor: 3)
reqs << Math::Di5AvArgs.new(dividend: 5, divisor: 2)
reqs << Math::DivArgs.new(dividend: 7, divisor: 2)
logger.info("div(7/3), div(5/2), div(7/2): reqs=#{reqs.inspect}")
resp = stub.div_many(reqs, deadline=10)
resp = stub.div_many(reqs, 10)
resp.each do |r|
logger.info("Answer: #{r.inspect}")
end
@ -103,7 +102,7 @@ end
def test_creds
certs = load_test_certs
creds = GRPC::Core::Credentials.new(certs[0])
GRPC::Core::Credentials.new(certs[0])
end
def main
@ -117,7 +116,7 @@ def main
options['host'] = v
end
opts.on('-s', '--secure', 'access using test creds') do |v|
options['secure'] = true
options['secure'] = v
end
end.parse!
@ -128,7 +127,7 @@ def main
if options['secure']
stub_opts = {
:creds => test_creds,
GRPC::Core::Channel::SSL_TARGET => 'foo.test.google.com',
GRPC::Core::Channel::SSL_TARGET => 'foo.test.google.com'
}
p stub_opts
p options['host']

@ -46,9 +46,8 @@ require 'optparse'
# Holds state for a fibonacci series
class Fibber
def initialize(limit)
raise "bad limit: got #{limit}, want limit > 0" if limit < 1
fail "bad limit: got #{limit}, want limit > 0" if limit < 1
@limit = limit
end
@ -57,14 +56,14 @@ class Fibber
idx, current, previous = 0, 1, 1
until idx == @limit
if idx == 0 || idx == 1
yield Math::Num.new(:num => 1)
yield Math::Num.new(num: 1)
idx += 1
next
end
tmp = current
current = previous + current
previous = tmp
yield Math::Num.new(:num => current)
yield Math::Num.new(num: current)
idx += 1
end
end
@ -85,43 +84,41 @@ class EnumeratorQueue
loop do
r = @q.pop
break if r.equal?(@sentinel)
raise r if r.is_a?Exception
fail r if r.is_a? Exception
yield r
end
end
end
# The Math::Math:: module occurs because the service has the same name as its
# package. That practice should be avoided by defining real services.
class Calculator < Math::Math::Service
def div(div_args, call)
def div(div_args, _call)
if div_args.divisor == 0
# To send non-OK status handlers raise a StatusError with the code and
# and detail they want sent as a Status.
raise GRPC::StatusError.new(GRPC::Status::INVALID_ARGUMENT,
'divisor cannot be 0')
fail GRPC::StatusError.new(GRPC::Status::INVALID_ARGUMENT,
'divisor cannot be 0')
end
Math::DivReply.new(:quotient => div_args.dividend/div_args.divisor,
:remainder => div_args.dividend % div_args.divisor)
Math::DivReply.new(quotient: div_args.dividend / div_args.divisor,
remainder: div_args.dividend % div_args.divisor)
end
def sum(call)
# the requests are accesible as the Enumerator call#each_request
nums = call.each_remote_read.collect { |x| x.num }
sum = nums.inject { |sum,x| sum + x }
Math::Num.new(:num => sum)
nums = call.each_remote_read.collect(&:num)
sum = nums.inject { |s, x| s + x }
Math::Num.new(num: sum)
end
def fib(fib_args, call)
def fib(fib_args, _call)
if fib_args.limit < 1
raise StatusError.new(Status::INVALID_ARGUMENT, 'limit must be >= 0')
fail StatusError.new(Status::INVALID_ARGUMENT, 'limit must be >= 0')
end
# return an Enumerator of Nums
Fibber.new(fib_args.limit).generator()
Fibber.new(fib_args.limit).generator
# just return the generator, GRPC::GenericServer sends each actual response
end
@ -132,10 +129,10 @@ class Calculator < Math::Math::Service
begin
requests.each do |req|
logger.info("read #{req.inspect}")
resp = Math::DivReply.new(:quotient => req.dividend/req.divisor,
:remainder => req.dividend % req.divisor)
resp = Math::DivReply.new(quotient: req.dividend / req.divisor,
remainder: req.dividend % req.divisor)
q.push(resp)
Thread::pass # let the internal Bidi threads run
Thread.pass # let the internal Bidi threads run
end
logger.info('finished reads')
q.push(self)
@ -147,7 +144,6 @@ class Calculator < Math::Math::Service
t.priority = -2 # hint that the div_many thread should not be favoured
q.each_item
end
end
def load_test_certs
@ -159,7 +155,7 @@ end
def test_server_creds
certs = load_test_certs
server_creds = GRPC::Core::ServerCredentials.new(nil, certs[1], certs[2])
GRPC::Core::ServerCredentials.new(nil, certs[1], certs[2])
end
def main
@ -173,7 +169,7 @@ def main
options['host'] = v
end
opts.on('-s', '--secure', 'access using test creds') do |v|
options['secure'] = true
options['secure'] = v
end
end.parse!

@ -40,16 +40,18 @@ $LOAD_PATH.unshift(lib_dir) unless $LOAD_PATH.include?(lib_dir)
require 'grpc'
require 'optparse'
# a simple non-protobuf message class.
class NoProtoMsg
def self.marshal(o)
def self.marshal(_o)
''
end
def self.unmarshal(o)
def self.unmarshal(_o)
NoProtoMsg.new
end
end
# service the uses the non-protobuf message class.
class NoProtoService
include GRPC::GenericService
rpc :AnRPC, NoProtoMsg, NoProtoMsg
@ -66,7 +68,7 @@ end
def test_creds
certs = load_test_certs
creds = GRPC::Core::Credentials.new(certs[0])
GRPC::Core::Credentials.new(certs[0])
end
def main
@ -80,14 +82,14 @@ def main
options['host'] = v
end
opts.on('-s', '--secure', 'access using test creds') do |v|
options['secure'] = true
options['secure'] = v
end
end.parse!
if options['secure']
stub_opts = {
:creds => test_creds,
GRPC::Core::Channel::SSL_TARGET => 'foo.test.google.com',
GRPC::Core::Channel::SSL_TARGET => 'foo.test.google.com'
}
p stub_opts
p options['host']

@ -40,26 +40,29 @@ $LOAD_PATH.unshift(lib_dir) unless $LOAD_PATH.include?(lib_dir)
require 'grpc'
require 'optparse'
# a simple non-protobuf message class.
class NoProtoMsg
def self.marshal(o)
def self.marshal(_o)
''
end
def self.unmarshal(o)
def self.unmarshal(_o)
NoProtoMsg.new
end
end
# service the uses the non-protobuf message class.
class NoProtoService
include GRPC::GenericService
rpc :AnRPC, NoProtoMsg, NoProtoMsg
end
# an implementation of the non-protobuf service.
class NoProto < NoProtoService
def initialize(default_var='ignored')
def initialize(_default_var = 'ignored')
end
def an_rpc(req, call)
def an_rpc(req, _call)
logger.info('echo service received a request')
req
end
@ -74,7 +77,7 @@ end
def test_server_creds
certs = load_test_certs
server_creds = GRPC::Core::ServerCredentials.new(nil, certs[1], certs[2])
GRPC::Core::ServerCredentials.new(nil, certs[1], certs[2])
end
def main
@ -88,7 +91,7 @@ def main
options['host'] = v
end
opts.on('-s', '--secure', 'access using test creds') do |v|
options['secure'] = true
options['secure'] = v
end
end.parse!
@ -106,5 +109,4 @@ def main
s.run
end
main

@ -33,29 +33,29 @@ LIBDIR = RbConfig::CONFIG['libdir']
INCLUDEDIR = RbConfig::CONFIG['includedir']
HEADER_DIRS = [
# Search /opt/local (Mac source install)
'/opt/local/include',
# Search /opt/local (Mac source install)
'/opt/local/include',
# Search /usr/local (Source install)
'/usr/local/include',
# Search /usr/local (Source install)
'/usr/local/include',
# Check the ruby install locations
INCLUDEDIR,
# Check the ruby install locations
INCLUDEDIR
]
LIB_DIRS = [
# Search /opt/local (Mac source install)
'/opt/local/lib',
# Search /opt/local (Mac source install)
'/opt/local/lib',
# Search /usr/local (Source install)
'/usr/local/lib',
# Search /usr/local (Source install)
'/usr/local/lib',
# Check the ruby install locations
LIBDIR,
# Check the ruby install locations
LIBDIR
]
def crash(msg)
print(" extconf failure: %s\n" % msg)
print(" extconf failure: #{msg}\n")
exit 1
end

@ -1,31 +1,34 @@
# encoding: utf-8
$:.push File.expand_path("../lib", __FILE__)
$LOAD_PATH.push File.expand_path('../lib', __FILE__)
require 'grpc/version'
Gem::Specification.new do |s|
s.name = "grpc"
s.name = 'grpc'
s.version = Google::RPC::VERSION
s.authors = ["One Platform Team"]
s.email = "stubby-team@google.com"
s.homepage = "http://go/grpc"
s.authors = ['One Platform Team']
s.email = 'stubby-team@google.com'
s.homepage = 'http://go/grpc'
s.summary = 'Google RPC system in Ruby'
s.description = 'Send RPCs from Ruby'
s.files = `git ls-files`.split("\n")
s.test_files = `git ls-files -- spec/*`.split("\n")
s.executables = `git ls-files -- examples/*.rb`.split("\n").map{ |f| File.basename(f) }
s.require_paths = ['lib' ]
s.executables = `git ls-files -- bin/*.rb`.split("\n").map do |f|
File.basename(f)
end
s.require_paths = ['lib']
s.platform = Gem::Platform::RUBY
s.add_dependency 'xray'
s.add_dependency 'logging', '~> 1.8'
s.add_dependency 'google-protobuf', '~> 3.0.0alpha.1.1'
s.add_dependency 'minitest', '~> 5.4' # not a dev dependency, used by the interop tests
s.add_dependency 'google-protobuf', '~> 3.0.0alpha'
s.add_dependency 'minitest', '~> 5.4' # reqd for interop tests
s.add_development_dependency "bundler", "~> 1.7"
s.add_development_dependency "rake", "~> 10.0"
s.add_development_dependency 'bundler', '~> 1.7'
s.add_development_dependency 'rake', '~> 10.0'
s.add_development_dependency 'rake-compiler', '~> 0'
s.add_development_dependency 'rspec', "~> 3.0"
s.add_development_dependency 'rubocop', '~> 0.28.0'
s.add_development_dependency 'rspec', '~> 3.0'
s.extensions = %w[ext/grpc/extconf.rb]
s.extensions = %w(ext/grpc/extconf.rb)
end

@ -29,25 +29,21 @@
require 'beefcake'
# Re-open the beefcake message module to add a static encode
#
# This is a temporary measure while beefcake is used as the default proto
# library for developing grpc ruby. Once that changes to the official proto
# library this can be removed. It's necessary to allow the update the service
# module to assume a static encode method.
#
# TODO(temiola): remove me, once official code generation is available in protoc
module Beefcake
# Re-open the beefcake message module to add a static encode
#
# This is a temporary measure while beefcake is used as the default proto
# library for developing grpc ruby. Once that changes to the official proto
# library this can be removed. It's necessary to allow the update the service
# module to assume a static encode method.
# TODO(temiola): remove this.
module Message
# additional mixin module that adds static encode method when include
module StaticEncode
# encodes o with its instance#encode method
def encode(o)
o.encode
end
end
# extend self.included in Beefcake::Message to include StaticEncode
@ -57,6 +53,5 @@ module Beefcake
o.extend Decode
o.send(:include, Encode)
end
end
end

@ -30,9 +30,12 @@
module Google
module RPC
module Core
class Event # Add an inspect method to C-defined Event class.
# Event is a class defined in the c extension
#
# Here, we add an inspect method.
class Event
def inspect
'<%s: type:%s, tag:%s result:%s>' % [self.class, type, tag, result]
"<#{self.class}: type:#{type}, tag:#{tag} result:#{result}>"
end
end
end

@ -32,9 +32,10 @@ require 'grpc'
module Google
module RPC
module Core
module TimeConsts # re-opens a module in the C extension.
# TimeConsts is a module from the C extension.
#
# Here it's re-opened to add a utility func.
module TimeConsts
# Converts a time delta to an absolute deadline.
#
# Assumes timeish is a relative time, and converts its to an absolute,
@ -48,24 +49,23 @@ module Google
# @param timeish [Number|TimeSpec]
# @return timeish [Number|TimeSpec]
def from_relative_time(timeish)
if timeish.is_a?TimeSpec
if timeish.is_a? TimeSpec
timeish
elsif timeish.nil?
TimeConsts::ZERO
elsif !timeish.is_a?Numeric
raise TypeError('Cannot make an absolute deadline from %s',
timeish.inspect)
elsif !timeish.is_a? Numeric
fail(TypeError,
"Cannot make an absolute deadline from #{timeish.inspect}")
elsif timeish < 0
TimeConsts::INFINITE_FUTURE
elsif timeish == 0
TimeConsts::ZERO
else !timeish.nil?
else
Time.now + timeish
end
end
module_function :from_relative_time
end
end
end

@ -30,9 +30,8 @@
require 'grpc'
module Google
# Google::RPC contains the General RPC module.
module RPC
# OutOfTime is an exception class that indicates that an RPC exceeded its
# deadline.
OutOfTime = Class.new(StandardError)
@ -42,12 +41,11 @@ module Google
# error should be returned to the other end of a GRPC connection; when
# caught it means that this end received a status error.
class BadStatus < StandardError
attr_reader :code, :details
# @param code [Numeric] the status code
# @param details [String] the details of the exception
def initialize(code, details='unknown cause')
def initialize(code, details = 'unknown cause')
super("#{code}:#{details}")
@code = code
@details = details
@ -60,9 +58,6 @@ module Google
def to_status
Status.new(code, details)
end
end
end
end

@ -31,519 +31,516 @@ require 'forwardable'
require 'grpc/generic/bidi_call'
def assert_event_type(ev, want)
raise OutOfTime if ev.nil?
fail OutOfTime if ev.nil?
got = ev.type
raise 'Unexpected rpc event: got %s, want %s' % [got, want] unless got == want
fail "Unexpected rpc event: got #{got}, want #{want}" unless got == want
end
module Google::RPC
# The ActiveCall class provides simple methods for sending marshallable
# data to a call
class ActiveCall
include Core::CompletionType
include Core::StatusCodes
include Core::TimeConsts
attr_reader(:deadline)
# client_start_invoke begins a client invocation.
#
# Flow Control note: this blocks until flow control accepts that client
# request can go ahead.
#
# deadline is the absolute deadline for the call.
#
# == Keyword Arguments ==
# any keyword arguments are treated as metadata to be sent to the server
# if a keyword value is a list, multiple metadata for it's key are sent
#
# @param call [Call] a call on which to start and invocation
# @param q [CompletionQueue] used to wait for INVOKE_ACCEPTED
# @param deadline [Fixnum,TimeSpec] the deadline for INVOKE_ACCEPTED
def self.client_start_invoke(call, q, deadline, **kw)
raise ArgumentError.new('not a call') unless call.is_a?Core::Call
if !q.is_a?Core::CompletionQueue
raise ArgumentError.new('not a CompletionQueue')
end
call.add_metadata(kw) if kw.length > 0
invoke_accepted, client_metadata_read = Object.new, Object.new
finished_tag = Object.new
call.start_invoke(q, invoke_accepted, client_metadata_read, finished_tag)
module Google
# Google::RPC contains the General RPC module.
module RPC
# The ActiveCall class provides simple methods for sending marshallable
# data to a call
class ActiveCall
include Core::CompletionType
include Core::StatusCodes
include Core::TimeConsts
attr_reader(:deadline)
# client_start_invoke begins a client invocation.
#
# Flow Control note: this blocks until flow control accepts that client
# request can go ahead.
#
# deadline is the absolute deadline for the call.
#
# == Keyword Arguments ==
# any keyword arguments are treated as metadata to be sent to the server
# if a keyword value is a list, multiple metadata for it's key are sent
#
# @param call [Call] a call on which to start and invocation
# @param q [CompletionQueue] used to wait for INVOKE_ACCEPTED
# @param deadline [Fixnum,TimeSpec] the deadline for INVOKE_ACCEPTED
def self.client_start_invoke(call, q, _deadline, **kw)
fail(ArgumentError, 'not a call') unless call.is_a? Core::Call
unless q.is_a? Core::CompletionQueue
fail(ArgumentError, 'not a CompletionQueue')
end
call.add_metadata(kw) if kw.length > 0
invoke_accepted, client_metadata_read = Object.new, Object.new
finished_tag = Object.new
call.start_invoke(q, invoke_accepted, client_metadata_read,
finished_tag)
# wait for the invocation to be accepted
ev = q.pluck(invoke_accepted, INFINITE_FUTURE)
fail OutOfTime if ev.nil?
ev.close
# wait for the invocation to be accepted
ev = q.pluck(invoke_accepted, INFINITE_FUTURE)
raise OutOfTime if ev.nil?
ev.close
[finished_tag, client_metadata_read]
end
[finished_tag, client_metadata_read]
end
# Creates an ActiveCall.
#
# ActiveCall should only be created after a call is accepted. That means
# different things on a client and a server. On the client, the call is
# accepted after call.start_invoke followed by receipt of the
# corresponding INVOKE_ACCEPTED. on the server, this is after
# call.accept.
#
# #initialize cannot determine if the call is accepted or not; so if a
# call that's not accepted is used here, the error won't be visible until
# the ActiveCall methods are called.
#
# deadline is the absolute deadline for the call.
#
# @param call [Call] the call used by the ActiveCall
# @param q [CompletionQueue] the completion queue used to accept
# the call
# @param marshal [Function] f(obj)->string that marshal requests
# @param unmarshal [Function] f(string)->obj that unmarshals responses
# @param deadline [Fixnum] the deadline for the call to complete
# @param finished_tag [Object] the object used as the call's finish tag,
# if the call has begun
# @param read_metadata_tag [Object] the object used as the call's finish
# tag, if the call has begun
# @param started [true|false] indicates if the call has begun
def initialize(call, q, marshal, unmarshal, deadline, finished_tag: nil,
read_metadata_tag: nil, started: true)
fail(ArgumentError, 'not a call') unless call.is_a? Core::Call
unless q.is_a? Core::CompletionQueue
fail(ArgumentError, 'not a CompletionQueue')
end
@call = call
@cq = q
@deadline = deadline
@finished_tag = finished_tag
@read_metadata_tag = read_metadata_tag
@marshal = marshal
@started = started
@unmarshal = unmarshal
end
# Creates an ActiveCall.
#
# ActiveCall should only be created after a call is accepted. That means
# different things on a client and a server. On the client, the call is
# accepted after call.start_invoke followed by receipt of the
# corresponding INVOKE_ACCEPTED. on the server, this is after
# call.accept.
#
# #initialize cannot determine if the call is accepted or not; so if a
# call that's not accepted is used here, the error won't be visible until
# the ActiveCall methods are called.
#
# deadline is the absolute deadline for the call.
#
# @param call [Call] the call used by the ActiveCall
# @param q [CompletionQueue] the completion queue used to accept
# the call
# @param marshal [Function] f(obj)->string that marshal requests
# @param unmarshal [Function] f(string)->obj that unmarshals responses
# @param deadline [Fixnum] the deadline for the call to complete
# @param finished_tag [Object] the object used as the call's finish tag,
# if the call has begun
# @param read_metadata_tag [Object] the object used as the call's finish
# tag, if the call has begun
# @param started [true|false] (default true) indicates if the call has begun
def initialize(call, q, marshal, unmarshal, deadline, finished_tag: nil,
read_metadata_tag: nil, started: true)
raise ArgumentError.new('not a call') unless call.is_a?Core::Call
if !q.is_a?Core::CompletionQueue
raise ArgumentError.new('not a CompletionQueue')
# Obtains the status of the call.
#
# this value is nil until the call completes
# @return this call's status
def status
@call.status
end
@call = call
@cq = q
@deadline = deadline
@finished_tag = finished_tag
@read_metadata_tag = read_metadata_tag
@marshal = marshal
@started = started
@unmarshal = unmarshal
end
# Obtains the status of the call.
#
# this value is nil until the call completes
# @return this call's status
def status
@call.status
end
# Obtains the metadata of the call.
#
# At the start of the call this will be nil. During the call this gets
# some values as soon as the other end of the connection acknowledges the
# request.
#
# @return this calls's metadata
def metadata
@call.metadata
end
# Obtains the metadata of the call.
#
# At the start of the call this will be nil. During the call this gets
# some values as soon as the other end of the connection acknowledges the
# request.
#
# @return this calls's metadata
def metadata
@call.metadata
end
# Cancels the call.
#
# Cancels the call. The call does not return any result, but once this it
# has been called, the call should eventually terminate. Due to potential
# races between the execution of the cancel and the in-flight request, the
# result of the call after calling #cancel is indeterminate:
#
# - the call may terminate with a BadStatus exception, with code=CANCELLED
# - the call may terminate with OK Status, and return a response
# - the call may terminate with a different BadStatus exception if that
# was happening
def cancel
@call.cancel
end
# Cancels the call.
#
# Cancels the call. The call does not return any result, but once this it
# has been called, the call should eventually terminate. Due to potential
# races between the execution of the cancel and the in-flight request, the
# result of the call after calling #cancel is indeterminate:
#
# - the call may terminate with a BadStatus exception, with code=CANCELLED
# - the call may terminate with OK Status, and return a response
# - the call may terminate with a different BadStatus exception if that was
# happening
def cancel
@call.cancel
end
# indicates if the call is shutdown
def shutdown
@shutdown ||= false
end
# indicates if the call is shutdown
def shutdown
@shutdown ||= false
end
# indicates if the call is cancelled.
def cancelled
@cancelled ||= false
end
# indicates if the call is cancelled.
def cancelled
@cancelled ||= false
end
# multi_req_view provides a restricted view of this ActiveCall for use
# in a server client-streaming handler.
def multi_req_view
MultiReqView.new(self)
end
# multi_req_view provides a restricted view of this ActiveCall for use
# in a server client-streaming handler.
def multi_req_view
MultiReqView.new(self)
end
# single_req_view provides a restricted view of this ActiveCall for use in
# a server request-response handler.
def single_req_view
SingleReqView.new(self)
end
# single_req_view provides a restricted view of this ActiveCall for use in
# a server request-response handler.
def single_req_view
SingleReqView.new(self)
end
# operation provides a restricted view of this ActiveCall for use as
# a Operation.
def operation
Operation.new(self)
end
# operation provides a restricted view of this ActiveCall for use as
# a Operation.
def operation
Operation.new(self)
end
# writes_done indicates that all writes are completed.
#
# It blocks until the remote endpoint acknowledges by sending a FINISHED
# event, unless assert_finished is set to false. Any calls to
# #remote_send after this call will fail.
#
# @param assert_finished [true, false] when true(default), waits for
# FINISHED.
def writes_done(assert_finished = true)
@call.writes_done(self)
ev = @cq.pluck(self, INFINITE_FUTURE)
begin
assert_event_type(ev, FINISH_ACCEPTED)
logger.debug("Writes done: waiting for finish? #{assert_finished}")
ensure
ev.close
end
# writes_done indicates that all writes are completed.
#
# It blocks until the remote endpoint acknowledges by sending a FINISHED
# event, unless assert_finished is set to false. Any calls to
# #remote_send after this call will fail.
#
# @param assert_finished [true, false] when true(default), waits for
# FINISHED.
def writes_done(assert_finished=true)
@call.writes_done(self)
ev = @cq.pluck(self, INFINITE_FUTURE)
begin
assert_event_type(ev, FINISH_ACCEPTED)
logger.debug("Writes done: waiting for finish? #{assert_finished}")
ensure
return unless assert_finished
ev = @cq.pluck(@finished_tag, INFINITE_FUTURE)
fail 'unexpected nil event' if ev.nil?
ev.close
@call.status
end
if assert_finished
# finished waits until the call is completed.
#
# It blocks until the remote endpoint acknowledges by sending a FINISHED
# event.
def finished
ev = @cq.pluck(@finished_tag, INFINITE_FUTURE)
raise "unexpected event: #{ev.inspect}" if ev.nil?
ev.close
return @call.status
begin
fail "unexpected event: #{ev.inspect}" unless ev.type == FINISHED
if @call.metadata.nil?
@call.metadata = ev.result.metadata
else
@call.metadata.merge!(ev.result.metadata)
end
if ev.result.code != Core::StatusCodes::OK
fail BadStatus.new(ev.result.code, ev.result.details)
end
res = ev.result
ensure
ev.close
end
res
end
end
# finished waits until the call is completed.
#
# It blocks until the remote endpoint acknowledges by sending a FINISHED
# event.
def finished
ev = @cq.pluck(@finished_tag, INFINITE_FUTURE)
begin
raise "unexpected event: #{ev.inspect}" unless ev.type == FINISHED
if @call.metadata.nil?
@call.metadata = ev.result.metadata
# remote_send sends a request to the remote endpoint.
#
# It blocks until the remote endpoint acknowledges by sending a
# WRITE_ACCEPTED. req can be marshalled already.
#
# @param req [Object, String] the object to send or it's marshal form.
# @param marshalled [false, true] indicates if the object is already
# marshalled.
def remote_send(req, marshalled = false)
assert_queue_is_ready
logger.debug("sending #{req.inspect}, marshalled? #{marshalled}")
if marshalled
payload = req
else
@call.metadata.merge!(ev.result.metadata)
payload = @marshal.call(req)
end
if ev.result.code != Core::StatusCodes::OK
raise BadStatus.new(ev.result.code, ev.result.details)
@call.start_write(Core::ByteBuffer.new(payload), self)
# call queue#pluck, and wait for WRITE_ACCEPTED, so as not to return
# until the flow control allows another send on this call.
ev = @cq.pluck(self, INFINITE_FUTURE)
begin
assert_event_type(ev, WRITE_ACCEPTED)
ensure
ev.close
end
res = ev.result
ensure
ev.close
end
res
end
# remote_send sends a request to the remote endpoint.
#
# It blocks until the remote endpoint acknowledges by sending a
# WRITE_ACCEPTED. req can be marshalled already.
#
# @param req [Object, String] the object to send or it's marshal form.
# @param marshalled [false, true] indicates if the object is already
# marshalled.
def remote_send(req, marshalled=false)
assert_queue_is_ready
logger.debug("sending payload #{req.inspect}, marshalled? #{marshalled}")
if marshalled
payload = req
else
payload = @marshal.call(req)
end
@call.start_write(Core::ByteBuffer.new(payload), self)
# call queue#pluck, and wait for WRITE_ACCEPTED, so as not to return
# until the flow control allows another send on this call.
ev = @cq.pluck(self, INFINITE_FUTURE)
begin
assert_event_type(ev, WRITE_ACCEPTED)
ensure
ev.close
# send_status sends a status to the remote endpoint
#
# @param code [int] the status code to send
# @param details [String] details
# @param assert_finished [true, false] when true(default), waits for
# FINISHED.
def send_status(code = OK, details = '', assert_finished = false)
assert_queue_is_ready
@call.start_write_status(code, details, self)
ev = @cq.pluck(self, INFINITE_FUTURE)
begin
assert_event_type(ev, FINISH_ACCEPTED)
ensure
ev.close
end
logger.debug("Status sent: #{code}:'#{details}'")
return finished if assert_finished
nil
end
end
# send_status sends a status to the remote endpoint
#
# @param code [int] the status code to send
# @param details [String] details
# @param assert_finished [true, false] when true(default), waits for
# FINISHED.
def send_status(code=OK, details='', assert_finished=false)
assert_queue_is_ready
@call.start_write_status(code, details, self)
ev = @cq.pluck(self, INFINITE_FUTURE)
begin
assert_event_type(ev, FINISH_ACCEPTED)
ensure
ev.close
end
logger.debug("Status sent: #{code}:'#{details}'")
if assert_finished
return finished
end
nil
end
# remote_read reads a response from the remote endpoint.
#
# It blocks until the remote endpoint sends a READ or FINISHED event. On
# a READ, it returns the response after unmarshalling it. On
# FINISHED, it returns nil if the status is OK, otherwise raising BadStatus
def remote_read
if @call.metadata.nil? && !@read_metadata_tag.nil?
ev = @cq.pluck(@read_metadata_tag, INFINITE_FUTURE)
assert_event_type(ev, CLIENT_METADATA_READ)
@call.metadata = ev.result
@read_metadata_tag = nil
end
# remote_read reads a response from the remote endpoint.
#
# It blocks until the remote endpoint sends a READ or FINISHED event. On
# a READ, it returns the response after unmarshalling it. On
# FINISHED, it returns nil if the status is OK, otherwise raising
# BadStatus
def remote_read
if @call.metadata.nil? && !@read_metadata_tag.nil?
ev = @cq.pluck(@read_metadata_tag, INFINITE_FUTURE)
assert_event_type(ev, CLIENT_METADATA_READ)
@call.metadata = ev.result
@read_metadata_tag = nil
end
@call.start_read(self)
ev = @cq.pluck(self, INFINITE_FUTURE)
begin
assert_event_type(ev, READ)
logger.debug("received req: #{ev.result.inspect}")
if !ev.result.nil?
logger.debug("received req.to_s: #{ev.result.to_s}")
res = @unmarshal.call(ev.result.to_s)
logger.debug("received_req (unmarshalled): #{res.inspect}")
return res
@call.start_read(self)
ev = @cq.pluck(self, INFINITE_FUTURE)
begin
assert_event_type(ev, READ)
logger.debug("received req: #{ev.result.inspect}")
unless ev.result.nil?
logger.debug("received req.to_s: #{ev.result}")
res = @unmarshal.call(ev.result.to_s)
logger.debug("received_req (unmarshalled): #{res.inspect}")
return res
end
ensure
ev.close
end
ensure
ev.close
logger.debug('found nil; the final response has been sent')
nil
end
logger.debug('found nil; the final response has been sent')
nil
end
# each_remote_read passes each response to the given block or returns an
# enumerator the responses if no block is given.
#
# == Enumerator ==
#
# * #next blocks until the remote endpoint sends a READ or FINISHED
# * for each read, enumerator#next yields the response
# * on status
# * if it's is OK, enumerator#next raises StopException
# * if is not OK, enumerator#next raises RuntimeException
#
# == Block ==
#
# * if provided it is executed for each response
# * the call blocks until no more responses are provided
#
# @return [Enumerator] if no block was given
def each_remote_read
return enum_for(:each_remote_read) if !block_given?
loop do
resp = remote_read()
break if resp.is_a?Struct::Status # is an OK status, bad statii raise
break if resp.nil? # the last response was received
yield resp
# each_remote_read passes each response to the given block or returns an
# enumerator the responses if no block is given.
#
# == Enumerator ==
#
# * #next blocks until the remote endpoint sends a READ or FINISHED
# * for each read, enumerator#next yields the response
# * on status
# * if it's is OK, enumerator#next raises StopException
# * if is not OK, enumerator#next raises RuntimeException
#
# == Block ==
#
# * if provided it is executed for each response
# * the call blocks until no more responses are provided
#
# @return [Enumerator] if no block was given
def each_remote_read
return enum_for(:each_remote_read) unless block_given?
loop do
resp = remote_read
break if resp.is_a? Struct::Status # is an OK status
break if resp.nil? # the last response was received
yield resp
end
end
end
# each_remote_read_then_finish passes each response to the given block or
# returns an enumerator of the responses if no block is given.
#
# It is like each_remote_read, but it blocks on finishing on detecting
# the final message.
#
# == Enumerator ==
#
# * #next blocks until the remote endpoint sends a READ or FINISHED
# * for each read, enumerator#next yields the response
# * on status
# * if it's is OK, enumerator#next raises StopException
# * if is not OK, enumerator#next raises RuntimeException
#
# == Block ==
#
# * if provided it is executed for each response
# * the call blocks until no more responses are provided
#
# @return [Enumerator] if no block was given
def each_remote_read_then_finish
return enum_for(:each_remote_read_then_finish) if !block_given?
loop do
resp = remote_read
break if resp.is_a?Struct::Status # is an OK status, bad statii raise
if resp.nil? # the last response was received, but not finished yet
finished
break
# each_remote_read_then_finish passes each response to the given block or
# returns an enumerator of the responses if no block is given.
#
# It is like each_remote_read, but it blocks on finishing on detecting
# the final message.
#
# == Enumerator ==
#
# * #next blocks until the remote endpoint sends a READ or FINISHED
# * for each read, enumerator#next yields the response
# * on status
# * if it's is OK, enumerator#next raises StopException
# * if is not OK, enumerator#next raises RuntimeException
#
# == Block ==
#
# * if provided it is executed for each response
# * the call blocks until no more responses are provided
#
# @return [Enumerator] if no block was given
def each_remote_read_then_finish
return enum_for(:each_remote_read_then_finish) unless block_given?
loop do
resp = remote_read
break if resp.is_a? Struct::Status # is an OK status
if resp.nil? # the last response was received, but not finished yet
finished
break
end
yield resp
end
yield resp
end
end
# request_response sends a request to a GRPC server, and returns the
# response.
#
# == Keyword Arguments ==
# any keyword arguments are treated as metadata to be sent to the server
# if a keyword value is a list, multiple metadata for it's key are sent
#
# @param req [Object] the request sent to the server
# @return [Object] the response received from the server
def request_response(req, **kw)
start_call(**kw) unless @started
remote_send(req)
writes_done(false)
response = remote_read
if !response.is_a?(Struct::Status) # finish if status not yet received
finished
# request_response sends a request to a GRPC server, and returns the
# response.
#
# == Keyword Arguments ==
# any keyword arguments are treated as metadata to be sent to the server
# if a keyword value is a list, multiple metadata for it's key are sent
#
# @param req [Object] the request sent to the server
# @return [Object] the response received from the server
def request_response(req, **kw)
start_call(**kw) unless @started
remote_send(req)
writes_done(false)
response = remote_read
finished unless response.is_a? Struct::Status
response
end
response
end
# client_streamer sends a stream of requests to a GRPC server, and
# returns a single response.
#
# requests provides an 'iterable' of Requests. I.e. it follows Ruby's
# #each enumeration protocol. In the simplest case, requests will be an
# array of marshallable objects; in typical case it will be an Enumerable
# that allows dynamic construction of the marshallable objects.
#
# == Keyword Arguments ==
# any keyword arguments are treated as metadata to be sent to the server
# if a keyword value is a list, multiple metadata for it's key are sent
#
# @param requests [Object] an Enumerable of requests to send
# @return [Object] the response received from the server
def client_streamer(requests, **kw)
start_call(**kw) unless @started
requests.each { |r| remote_send(r) }
writes_done(false)
response = remote_read
if !response.is_a?(Struct::Status) # finish if status not yet received
finished
# client_streamer sends a stream of requests to a GRPC server, and
# returns a single response.
#
# requests provides an 'iterable' of Requests. I.e. it follows Ruby's
# #each enumeration protocol. In the simplest case, requests will be an
# array of marshallable objects; in typical case it will be an Enumerable
# that allows dynamic construction of the marshallable objects.
#
# == Keyword Arguments ==
# any keyword arguments are treated as metadata to be sent to the server
# if a keyword value is a list, multiple metadata for it's key are sent
#
# @param requests [Object] an Enumerable of requests to send
# @return [Object] the response received from the server
def client_streamer(requests, **kw)
start_call(**kw) unless @started
requests.each { |r| remote_send(r) }
writes_done(false)
response = remote_read
finished unless response.is_a? Struct::Status
response
end
response
end
# server_streamer sends one request to the GRPC server, which yields a
# stream of responses.
#
# responses provides an enumerator over the streamed responses, i.e. it
# follows Ruby's #each iteration protocol. The enumerator blocks while
# waiting for each response, stops when the server signals that no
# further responses will be supplied. If the implicit block is provided,
# it is executed with each response as the argument and no result is
# returned.
#
# == Keyword Arguments ==
# any keyword arguments are treated as metadata to be sent to the server
# if a keyword value is a list, multiple metadata for it's key are sent
# any keyword arguments are treated as metadata to be sent to the server.
#
# @param req [Object] the request sent to the server
# @return [Enumerator|nil] a response Enumerator
def server_streamer(req, **kw)
start_call(**kw) unless @started
remote_send(req)
writes_done(false)
replies = enum_for(:each_remote_read_then_finish)
return replies if !block_given?
replies.each { |r| yield r }
end
# server_streamer sends one request to the GRPC server, which yields a
# stream of responses.
#
# responses provides an enumerator over the streamed responses, i.e. it
# follows Ruby's #each iteration protocol. The enumerator blocks while
# waiting for each response, stops when the server signals that no
# further responses will be supplied. If the implicit block is provided,
# it is executed with each response as the argument and no result is
# returned.
#
# == Keyword Arguments ==
# any keyword arguments are treated as metadata to be sent to the server
# if a keyword value is a list, multiple metadata for it's key are sent
# any keyword arguments are treated as metadata to be sent to the server.
#
# @param req [Object] the request sent to the server
# @return [Enumerator|nil] a response Enumerator
def server_streamer(req, **kw)
start_call(**kw) unless @started
remote_send(req)
writes_done(false)
replies = enum_for(:each_remote_read_then_finish)
return replies unless block_given?
replies.each { |r| yield r }
end
# bidi_streamer sends a stream of requests to the GRPC server, and yields
# a stream of responses.
#
# This method takes an Enumerable of requests, and returns and enumerable
# of responses.
#
# == requests ==
#
# requests provides an 'iterable' of Requests. I.e. it follows Ruby's #each
# enumeration protocol. In the simplest case, requests will be an array of
# marshallable objects; in typical case it will be an Enumerable that
# allows dynamic construction of the marshallable objects.
#
# == responses ==
#
# This is an enumerator of responses. I.e, its #next method blocks
# waiting for the next response. Also, if at any point the block needs
# to consume all the remaining responses, this can be done using #each or
# #collect. Calling #each or #collect should only be done if
# the_call#writes_done has been called, otherwise the block will loop
# forever.
#
# == Keyword Arguments ==
# any keyword arguments are treated as metadata to be sent to the server
# if a keyword value is a list, multiple metadata for it's key are sent
#
# @param requests [Object] an Enumerable of requests to send
# @return [Enumerator, nil] a response Enumerator
def bidi_streamer(requests, **kw, &blk)
start_call(**kw) unless @started
bd = BidiCall.new(@call, @cq, @marshal, @unmarshal, @deadline,
@finished_tag)
bd.run_on_client(requests, &blk)
end
# bidi_streamer sends a stream of requests to the GRPC server, and yields
# a stream of responses.
#
# This method takes an Enumerable of requests, and returns and enumerable
# of responses.
#
# == requests ==
#
# requests provides an 'iterable' of Requests. I.e. it follows Ruby's
# #each enumeration protocol. In the simplest case, requests will be an
# array of marshallable objects; in typical case it will be an
# Enumerable that allows dynamic construction of the marshallable
# objects.
#
# == responses ==
#
# This is an enumerator of responses. I.e, its #next method blocks
# waiting for the next response. Also, if at any point the block needs
# to consume all the remaining responses, this can be done using #each or
# #collect. Calling #each or #collect should only be done if
# the_call#writes_done has been called, otherwise the block will loop
# forever.
#
# == Keyword Arguments ==
# any keyword arguments are treated as metadata to be sent to the server
# if a keyword value is a list, multiple metadata for it's key are sent
#
# @param requests [Object] an Enumerable of requests to send
# @return [Enumerator, nil] a response Enumerator
def bidi_streamer(requests, **kw, &blk)
start_call(**kw) unless @started
bd = BidiCall.new(@call, @cq, @marshal, @unmarshal, @deadline,
@finished_tag)
bd.run_on_client(requests, &blk)
end
# run_server_bidi orchestrates a BiDi stream processing on a server.
#
# N.B. gen_each_reply is a func(Enumerable<Requests>)
#
# It takes an enumerable of requests as an arg, in case there is a
# relationship between the stream of requests and the stream of replies.
#
# This does not mean that must necessarily be one. E.g, the replies
# produced by gen_each_reply could ignore the received_msgs
#
# @param gen_each_reply [Proc] generates the BiDi stream replies
def run_server_bidi(gen_each_reply)
bd = BidiCall.new(@call, @cq, @marshal, @unmarshal, @deadline,
@finished_tag)
bd.run_on_server(gen_each_reply)
end
# run_server_bidi orchestrates a BiDi stream processing on a server.
#
# N.B. gen_each_reply is a func(Enumerable<Requests>)
#
# It takes an enumerable of requests as an arg, in case there is a
# relationship between the stream of requests and the stream of replies.
#
# This does not mean that must necessarily be one. E.g, the replies
# produced by gen_each_reply could ignore the received_msgs
#
# @param gen_each_reply [Proc] generates the BiDi stream replies
def run_server_bidi(gen_each_reply)
bd = BidiCall.new(@call, @cq, @marshal, @unmarshal, @deadline,
@finished_tag)
bd.run_on_server(gen_each_reply)
end
private
private
def start_call(**kw)
tags = ActiveCall.client_start_invoke(@call, @cq, @deadline, **kw)
@finished_tag, @read_metadata_tag = tags
@started = true
end
def start_call(**kw)
tags = ActiveCall.client_start_invoke(@call, @cq, @deadline, **kw)
@finished_tag, @read_metadata_tag = tags
@started = true
end
def self.view_class(*visible_methods)
Class.new do
extend ::Forwardable
def_delegators :@wrapped, *visible_methods
def self.view_class(*visible_methods)
Class.new do
extend ::Forwardable
def_delegators :@wrapped, *visible_methods
# @param wrapped [ActiveCall] the call whose methods are shielded
def initialize(wrapped)
@wrapped = wrapped
# @param wrapped [ActiveCall] the call whose methods are shielded
def initialize(wrapped)
@wrapped = wrapped
end
end
end
end
# SingleReqView limits access to an ActiveCall's methods for use in server
# handlers that receive just one request.
SingleReqView = view_class(:cancelled, :deadline)
# MultiReqView limits access to an ActiveCall's methods for use in
# server client_streamer handlers.
MultiReqView = view_class(:cancelled, :deadline, :each_queued_msg,
:each_remote_read)
# Operation limits access to an ActiveCall's methods for use as
# a Operation on the client.
Operation = view_class(:cancel, :cancelled, :deadline, :execute, :metadata,
:status)
# confirms that no events are enqueued, and that the queue is not
# shutdown.
def assert_queue_is_ready
ev = nil
begin
ev = @cq.pluck(self, ZERO)
raise "unexpected event #{ev.inspect}" unless ev.nil?
rescue OutOfTime
# expected, nothing should be on the queue and the deadline was ZERO,
# except things using another tag
ensure
ev.close unless ev.nil?
# SingleReqView limits access to an ActiveCall's methods for use in server
# handlers that receive just one request.
SingleReqView = view_class(:cancelled, :deadline)
# MultiReqView limits access to an ActiveCall's methods for use in
# server client_streamer handlers.
MultiReqView = view_class(:cancelled, :deadline, :each_queued_msg,
:each_remote_read)
# Operation limits access to an ActiveCall's methods for use as
# a Operation on the client.
Operation = view_class(:cancel, :cancelled, :deadline, :execute,
:metadata, :status)
# confirms that no events are enqueued, and that the queue is not
# shutdown.
def assert_queue_is_ready
ev = nil
begin
ev = @cq.pluck(self, ZERO)
fail "unexpected event #{ev.inspect}" unless ev.nil?
rescue OutOfTime
logging.debug('timed out waiting for next event')
# expected, nothing should be on the queue and the deadline was ZERO,
# except things using another tag
ensure
ev.close unless ev.nil?
end
end
end
end
end

@ -31,194 +31,195 @@ require 'forwardable'
require 'grpc/grpc'
def assert_event_type(ev, want)
raise OutOfTime if ev.nil?
fail OutOfTime if ev.nil?
got = ev.type
raise 'Unexpected rpc event: got %s, want %s' % [got, want] unless got == want
fail("Unexpected rpc event: got #{got}, want #{want}") unless got == want
end
module Google::RPC
# The BiDiCall class orchestrates exection of a BiDi stream on a client or
# server.
class BidiCall
include Core::CompletionType
include Core::StatusCodes
include Core::TimeConsts
# Creates a BidiCall.
#
# BidiCall should only be created after a call is accepted. That means
# different things on a client and a server. On the client, the call is
# accepted after call.start_invoke followed by receipt of the corresponding
# INVOKE_ACCEPTED. On the server, this is after call.accept.
#
# #initialize cannot determine if the call is accepted or not; so if a
# call that's not accepted is used here, the error won't be visible until
# the BidiCall#run is called.
#
# deadline is the absolute deadline for the call.
#
# @param call [Call] the call used by the ActiveCall
# @param q [CompletionQueue] the completion queue used to accept
# the call
# @param marshal [Function] f(obj)->string that marshal requests
# @param unmarshal [Function] f(string)->obj that unmarshals responses
# @param deadline [Fixnum] the deadline for the call to complete
# @param finished_tag [Object] the object used as the call's finish tag,
def initialize(call, q, marshal, unmarshal, deadline, finished_tag)
raise ArgumentError.new('not a call') unless call.is_a?Core::Call
if !q.is_a?Core::CompletionQueue
raise ArgumentError.new('not a CompletionQueue')
module Google
# Google::RPC contains the General RPC module.
module RPC
# The BiDiCall class orchestrates exection of a BiDi stream on a client or
# server.
class BidiCall
include Core::CompletionType
include Core::StatusCodes
include Core::TimeConsts
# Creates a BidiCall.
#
# BidiCall should only be created after a call is accepted. That means
# different things on a client and a server. On the client, the call is
# accepted after call.start_invoke followed by receipt of the
# corresponding INVOKE_ACCEPTED. On the server, this is after
# call.accept.
#
# #initialize cannot determine if the call is accepted or not; so if a
# call that's not accepted is used here, the error won't be visible until
# the BidiCall#run is called.
#
# deadline is the absolute deadline for the call.
#
# @param call [Call] the call used by the ActiveCall
# @param q [CompletionQueue] the completion queue used to accept
# the call
# @param marshal [Function] f(obj)->string that marshal requests
# @param unmarshal [Function] f(string)->obj that unmarshals responses
# @param deadline [Fixnum] the deadline for the call to complete
# @param finished_tag [Object] the object used as the call's finish tag,
def initialize(call, q, marshal, unmarshal, deadline, finished_tag)
fail(ArgumentError, 'not a call') unless call.is_a? Core::Call
unless q.is_a? Core::CompletionQueue
fail(ArgumentError, 'not a CompletionQueue')
end
@call = call
@cq = q
@deadline = deadline
@finished_tag = finished_tag
@marshal = marshal
@readq = Queue.new
@unmarshal = unmarshal
end
@call = call
@cq = q
@deadline = deadline
@finished_tag = finished_tag
@marshal = marshal
@readq = Queue.new
@unmarshal = unmarshal
end
# Begins orchestration of the Bidi stream for a client sending requests.
#
# The method either returns an Enumerator of the responses, or accepts a
# block that can be invoked with each response.
#
# @param requests the Enumerable of requests to send
# @return an Enumerator of requests to yield
def run_on_client(requests, &blk)
enq_th = start_write_loop(requests)
loop_th = start_read_loop
replies = each_queued_msg
return replies if blk.nil?
replies.each { |r| blk.call(r) }
enq_th.join
loop_th.join
end
# Begins orchestration of the Bidi stream for a server generating replies.
#
# N.B. gen_each_reply is a func(Enumerable<Requests>)
#
# It takes an enumerable of requests as an arg, in case there is a
# relationship between the stream of requests and the stream of replies.
#
# This does not mean that must necessarily be one. E.g, the replies
# produced by gen_each_reply could ignore the received_msgs
#
# @param gen_each_reply [Proc] generates the BiDi stream replies.
def run_on_server(gen_each_reply)
replys = gen_each_reply.call(each_queued_msg)
enq_th = start_write_loop(replys, is_client:false)
loop_th = start_read_loop()
loop_th.join
enq_th.join
end
private
# Begins orchestration of the Bidi stream for a client sending requests.
#
# The method either returns an Enumerator of the responses, or accepts a
# block that can be invoked with each response.
#
# @param requests the Enumerable of requests to send
# @return an Enumerator of requests to yield
def run_on_client(requests, &blk)
enq_th = start_write_loop(requests)
loop_th = start_read_loop
replies = each_queued_msg
return replies if blk.nil?
replies.each { |r| blk.call(r) }
enq_th.join
loop_th.join
end
END_OF_READS = :end_of_reads
END_OF_WRITES = :end_of_writes
# Begins orchestration of the Bidi stream for a server generating replies.
#
# N.B. gen_each_reply is a func(Enumerable<Requests>)
#
# It takes an enumerable of requests as an arg, in case there is a
# relationship between the stream of requests and the stream of replies.
#
# This does not mean that must necessarily be one. E.g, the replies
# produced by gen_each_reply could ignore the received_msgs
#
# @param gen_each_reply [Proc] generates the BiDi stream replies.
def run_on_server(gen_each_reply)
replys = gen_each_reply.call(each_queued_msg)
enq_th = start_write_loop(replys, is_client: false)
loop_th = start_read_loop
loop_th.join
enq_th.join
end
# each_queued_msg yields each message on this instances readq
#
# - messages are added to the readq by #read_loop
# - iteration ends when the instance itself is added
def each_queued_msg
return enum_for(:each_queued_msg) if !block_given?
count = 0
loop do
logger.debug("each_queued_msg: msg##{count}")
count += 1
req = @readq.pop
throw req if req.is_a?StandardError
break if req.equal?(END_OF_READS)
yield req
private
END_OF_READS = :end_of_reads
END_OF_WRITES = :end_of_writes
# each_queued_msg yields each message on this instances readq
#
# - messages are added to the readq by #read_loop
# - iteration ends when the instance itself is added
def each_queued_msg
return enum_for(:each_queued_msg) unless block_given?
count = 0
loop do
logger.debug("each_queued_msg: msg##{count}")
count += 1
req = @readq.pop
throw req if req.is_a? StandardError
break if req.equal?(END_OF_READS)
yield req
end
end
end
# during bidi-streaming, read the requests to send from a separate thread
# read so that read_loop does not block waiting for requests to read.
def start_write_loop(requests, is_client: true)
Thread.new do # TODO(temiola) run on a thread pool
write_tag = Object.new
begin
count = 0
requests.each do |req|
count += 1
payload = @marshal.call(req)
@call.start_write(Core::ByteBuffer.new(payload), write_tag)
ev = @cq.pluck(write_tag, INFINITE_FUTURE)
begin
assert_event_type(ev, WRITE_ACCEPTED)
ensure
ev.close
end
end
if is_client
@call.writes_done(write_tag)
ev = @cq.pluck(write_tag, INFINITE_FUTURE)
begin
assert_event_type(ev, FINISH_ACCEPTED)
ensure
ev.close
# during bidi-streaming, read the requests to send from a separate thread
# read so that read_loop does not block waiting for requests to read.
def start_write_loop(requests, is_client: true)
Thread.new do # TODO(temiola) run on a thread pool
write_tag = Object.new
begin
count = 0
requests.each do |req|
count += 1
payload = @marshal.call(req)
@call.start_write(Core::ByteBuffer.new(payload), write_tag)
ev = @cq.pluck(write_tag, INFINITE_FUTURE)
begin
assert_event_type(ev, WRITE_ACCEPTED)
ensure
ev.close
end
end
logger.debug("bidi-client: sent #{count} reqs, waiting to finish")
ev = @cq.pluck(@finished_tag, INFINITE_FUTURE)
begin
assert_event_type(ev, FINISHED)
ensure
ev.close
if is_client
@call.writes_done(write_tag)
ev = @cq.pluck(write_tag, INFINITE_FUTURE)
begin
assert_event_type(ev, FINISH_ACCEPTED)
ensure
ev.close
end
logger.debug("bidi-client: sent #{count} reqs, waiting to finish")
ev = @cq.pluck(@finished_tag, INFINITE_FUTURE)
begin
assert_event_type(ev, FINISHED)
ensure
ev.close
end
logger.debug('bidi-client: finished received')
end
logger.debug('bidi-client: finished received')
rescue StandardError => e
logger.warn('bidi: write_loop failed')
logger.warn(e)
end
rescue StandardError => e
logger.warn('bidi: write_loop failed')
logger.warn(e)
end
end
end
# starts the read loop
def start_read_loop()
t = Thread.new do
begin
read_tag = Object.new
count = 0
# queue the initial read before beginning the loop
loop do
logger.debug("waiting for read #{count}")
count += 1
@call.start_read(read_tag)
ev = @cq.pluck(read_tag, INFINITE_FUTURE)
begin
assert_event_type(ev, READ)
# handle the next event.
if ev.result.nil?
@readq.push(END_OF_READS)
logger.debug('done reading!')
break
# starts the read loop
def start_read_loop
Thread.new do
begin
read_tag = Object.new
count = 0
# queue the initial read before beginning the loop
loop do
logger.debug("waiting for read #{count}")
count += 1
@call.start_read(read_tag)
ev = @cq.pluck(read_tag, INFINITE_FUTURE)
begin
assert_event_type(ev, READ)
# handle the next event.
if ev.result.nil?
@readq.push(END_OF_READS)
logger.debug('done reading!')
break
end
# push the latest read onto the queue and continue reading
logger.debug("received req: #{ev.result}")
res = @unmarshal.call(ev.result.to_s)
@readq.push(res)
ensure
ev.close
end
# push the latest read onto the queue and continue reading
logger.debug("received req.to_s: #{ev.result.to_s}")
res = @unmarshal.call(ev.result.to_s)
@readq.push(res)
ensure
ev.close
end
end
rescue StandardError => e
logger.warn('bidi: read_loop failed')
logger.warn(e)
@readq.push(e) # let each_queued_msg terminate with this error
rescue StandardError => e
logger.warn('bidi: read_loop failed')
logger.warn(e)
@readq.push(e) # let each_queued_msg terminate with this error
end
end
end
end
end
end

@ -30,377 +30,381 @@
require 'grpc/generic/active_call'
require 'xray/thread_dump_signal_handler'
module Google::RPC
module Google
# Google::RPC contains the General RPC module.
module RPC
# ClientStub represents an endpoint used to send requests to GRPC servers.
class ClientStub
include Core::StatusCodes
# ClientStub represents an endpoint used to send requests to GRPC servers.
class ClientStub
include Core::StatusCodes
# Default deadline is 5 seconds.
DEFAULT_DEADLINE = 5
# Default deadline is 5 seconds.
DEFAULT_DEADLINE = 5
# Creates a new ClientStub.
#
# Minimally, a stub is created with the just the host of the gRPC service
# it wishes to access, e.g.,
#
# my_stub = ClientStub.new(example.host.com:50505)
#
# Any arbitrary keyword arguments are treated as channel arguments used to
# configure the RPC connection to the host.
#
# There are some specific keyword args that are not used to configure the
# channel:
#
# - :channel_override
# when present, this must be a pre-created GRPC::Channel. If it's present
# the host and arbitrary keyword arg areignored, and the RPC connection uses
# this channel.
#
# - :deadline
# when present, this is the default deadline used for calls
#
# - :update_metadata
# when present, this a func that takes a hash and returns a hash
# it can be used to update metadata, i.e, remove, change or update
# amend metadata values.
#
# @param host [String] the host the stub connects to
# @param q [Core::CompletionQueue] used to wait for events
# @param channel_override [Core::Channel] a pre-created channel
# @param deadline [Number] the default deadline to use in requests
# @param creds [Core::Credentials] secures and/or authenticates the channel
# @param update_metadata a func that updates metadata as described above
# @param kw [KeywordArgs]the channel arguments
def initialize(host, q,
channel_override:nil,
deadline: DEFAULT_DEADLINE,
creds: nil,
update_metadata: nil,
**kw)
if !q.is_a?Core::CompletionQueue
raise ArgumentError.new('not a CompletionQueue')
end
@queue = q
# Creates a new ClientStub.
#
# Minimally, a stub is created with the just the host of the gRPC service
# it wishes to access, e.g.,
#
# my_stub = ClientStub.new(example.host.com:50505)
#
# Any arbitrary keyword arguments are treated as channel arguments used to
# configure the RPC connection to the host.
#
# There are some specific keyword args that are not used to configure the
# channel:
#
# - :channel_override
# when present, this must be a pre-created GRPC::Channel. If it's
# present the host and arbitrary keyword arg areignored, and the RPC
# connection uses this channel.
#
# - :deadline
# when present, this is the default deadline used for calls
#
# - :update_metadata
# when present, this a func that takes a hash and returns a hash
# it can be used to update metadata, i.e, remove, change or update
# amend metadata values.
#
# @param host [String] the host the stub connects to
# @param q [Core::CompletionQueue] used to wait for events
# @param channel_override [Core::Channel] a pre-created channel
# @param deadline [Number] the default deadline to use in requests
# @param creds [Core::Credentials] the channel
# @param update_metadata a func that updates metadata as described above
# @param kw [KeywordArgs]the channel arguments
def initialize(host, q,
channel_override:nil,
deadline: DEFAULT_DEADLINE,
creds: nil,
update_metadata: nil,
**kw)
unless q.is_a? Core::CompletionQueue
fail(ArgumentError, 'not a CompletionQueue')
end
@queue = q
# set the channel instance
if !channel_override.nil?
ch = channel_override
raise ArgumentError.new('not a Channel') unless ch.is_a?(Core::Channel)
elsif creds.nil?
ch = Core::Channel.new(host, kw)
elsif !creds.is_a?(Core::Credentials)
raise ArgumentError.new('not a Credentials')
else
ch = Core::Channel.new(host, kw, creds)
end
@ch = ch
# set the channel instance
if !channel_override.nil?
ch = channel_override
fail(ArgumentError, 'not a Channel') unless ch.is_a? Core::Channel
else
if creds.nil?
ch = Core::Channel.new(host, kw)
elsif !creds.is_a?(Core::Credentials)
fail(ArgumentError, 'not a Credentials')
else
ch = Core::Channel.new(host, kw, creds)
end
end
@ch = ch
@update_metadata = nil
if !update_metadata.nil?
if !update_metadata.is_a?(Proc)
raise ArgumentError.new('update_metadata is not a Proc')
@update_metadata = nil
unless update_metadata.nil?
unless update_metadata.is_a? Proc
fail(ArgumentError, 'update_metadata is not a Proc')
end
@update_metadata = update_metadata
end
@update_metadata = update_metadata
@host = host
@deadline = deadline
end
# request_response sends a request to a GRPC server, and returns the
# response.
#
# == Flow Control ==
# This is a blocking call.
#
# * it does not return until a response is received.
#
# * the requests is sent only when GRPC core's flow control allows it to
# be sent.
#
# == Errors ==
# An RuntimeError is raised if
#
# * the server responds with a non-OK status
#
# * the deadline is exceeded
#
# == Return Value ==
#
# If return_op is false, the call returns the response
#
# If return_op is true, the call returns an Operation, calling execute
# on the Operation returns the response.
#
# == Keyword Args ==
#
# Unspecified keyword arguments are treated as metadata to be sent to the
# server.
#
# @param method [String] the RPC method to call on the GRPC server
# @param req [Object] the request sent to the server
# @param marshal [Function] f(obj)->string that marshals requests
# @param unmarshal [Function] f(string)->obj that unmarshals responses
# @param deadline [Numeric] (optional) the max completion time in seconds
# @param return_op [true|false] return an Operation if true
# @return [Object] the response received from the server
def request_response(method, req, marshal, unmarshal, deadline = nil,
return_op: false, **kw)
c = new_active_call(method, marshal, unmarshal, deadline || @deadline)
md = @update_metadata.nil? ? kw : @update_metadata.call(kw.clone)
return c.request_response(req, **md) unless return_op
@host = host
@deadline = deadline
end
# return the operation view of the active_call; define #execute as a
# new method for this instance that invokes #request_response.
op = c.operation
op.define_singleton_method(:execute) do
c.request_response(req, **md)
end
op
end
# request_response sends a request to a GRPC server, and returns the
# response.
#
# == Flow Control ==
# This is a blocking call.
#
# * it does not return until a response is received.
#
# * the requests is sent only when GRPC core's flow control allows it to
# be sent.
#
# == Errors ==
# An RuntimeError is raised if
#
# * the server responds with a non-OK status
#
# * the deadline is exceeded
#
# == Return Value ==
#
# If return_op is false, the call returns the response
#
# If return_op is true, the call returns an Operation, calling execute
# on the Operation returns the response.
#
# == Keyword Args ==
#
# Unspecified keyword arguments are treated as metadata to be sent to the
# server.
#
# @param method [String] the RPC method to call on the GRPC server
# @param req [Object] the request sent to the server
# @param marshal [Function] f(obj)->string that marshals requests
# @param unmarshal [Function] f(string)->obj that unmarshals responses
# @param deadline [Numeric] (optional) the max completion time in seconds
# @param return_op [true|false] (default false) return an Operation if true
# @return [Object] the response received from the server
def request_response(method, req, marshal, unmarshal, deadline=nil,
return_op:false, **kw)
c = new_active_call(method, marshal, unmarshal, deadline || @deadline)
md = @update_metadata.nil? ? kw : @update_metadata.call(kw.clone)
return c.request_response(req, **md) unless return_op
# client_streamer sends a stream of requests to a GRPC server, and
# returns a single response.
#
# requests provides an 'iterable' of Requests. I.e. it follows Ruby's
# #each enumeration protocol. In the simplest case, requests will be an
# array of marshallable objects; in typical case it will be an Enumerable
# that allows dynamic construction of the marshallable objects.
#
# == Flow Control ==
# This is a blocking call.
#
# * it does not return until a response is received.
#
# * each requests is sent only when GRPC core's flow control allows it to
# be sent.
#
# == Errors ==
# An RuntimeError is raised if
#
# * the server responds with a non-OK status
#
# * the deadline is exceeded
#
# == Return Value ==
#
# If return_op is false, the call consumes the requests and returns
# the response.
#
# If return_op is true, the call returns the response.
#
# == Keyword Args ==
#
# Unspecified keyword arguments are treated as metadata to be sent to the
# server.
#
# @param method [String] the RPC method to call on the GRPC server
# @param requests [Object] an Enumerable of requests to send
# @param marshal [Function] f(obj)->string that marshals requests
# @param unmarshal [Function] f(string)->obj that unmarshals responses
# @param deadline [Numeric] the max completion time in seconds
# @param return_op [true|false] return an Operation if true
# @return [Object|Operation] the response received from the server
def client_streamer(method, requests, marshal, unmarshal, deadline = nil,
return_op: false, **kw)
c = new_active_call(method, marshal, unmarshal, deadline || @deadline)
md = @update_metadata.nil? ? kw : @update_metadata.call(kw.clone)
return c.client_streamer(requests, **md) unless return_op
# return the operation view of the active_call; define #execute as a
# new method for this instance that invokes #request_response.
op = c.operation
op.define_singleton_method(:execute) do
c.request_response(req, **md)
# return the operation view of the active_call; define #execute as a
# new method for this instance that invokes #client_streamer.
op = c.operation
op.define_singleton_method(:execute) do
c.client_streamer(requests, **md)
end
op
end
op
end
# client_streamer sends a stream of requests to a GRPC server, and
# returns a single response.
#
# requests provides an 'iterable' of Requests. I.e. it follows Ruby's
# #each enumeration protocol. In the simplest case, requests will be an
# array of marshallable objects; in typical case it will be an Enumerable
# that allows dynamic construction of the marshallable objects.
#
# == Flow Control ==
# This is a blocking call.
#
# * it does not return until a response is received.
#
# * each requests is sent only when GRPC core's flow control allows it to
# be sent.
#
# == Errors ==
# An RuntimeError is raised if
#
# * the server responds with a non-OK status
#
# * the deadline is exceeded
#
# == Return Value ==
#
# If return_op is false, the call consumes the requests and returns
# the response.
#
# If return_op is true, the call returns the response.
#
# == Keyword Args ==
#
# Unspecified keyword arguments are treated as metadata to be sent to the
# server.
#
# @param method [String] the RPC method to call on the GRPC server
# @param requests [Object] an Enumerable of requests to send
# @param marshal [Function] f(obj)->string that marshals requests
# @param unmarshal [Function] f(string)->obj that unmarshals responses
# @param deadline [Numeric] the max completion time in seconds
# @param return_op [true|false] (default false) return an Operation if true
# @return [Object|Operation] the response received from the server
def client_streamer(method, requests, marshal, unmarshal, deadline=nil,
return_op:false, **kw)
c = new_active_call(method, marshal, unmarshal, deadline || @deadline)
md = @update_metadata.nil? ? kw : @update_metadata.call(kw.clone)
return c.client_streamer(requests, **md) unless return_op
# server_streamer sends one request to the GRPC server, which yields a
# stream of responses.
#
# responses provides an enumerator over the streamed responses, i.e. it
# follows Ruby's #each iteration protocol. The enumerator blocks while
# waiting for each response, stops when the server signals that no
# further responses will be supplied. If the implicit block is provided,
# it is executed with each response as the argument and no result is
# returned.
#
# == Flow Control ==
# This is a blocking call.
#
# * the request is sent only when GRPC core's flow control allows it to
# be sent.
#
# * the request will not complete until the server sends the final
# response followed by a status message.
#
# == Errors ==
# An RuntimeError is raised if
#
# * the server responds with a non-OK status when any response is
# * retrieved
#
# * the deadline is exceeded
#
# == Return Value ==
#
# if the return_op is false, the return value is an Enumerator of the
# results, unless a block is provided, in which case the block is
# executed with each response.
#
# if return_op is true, the function returns an Operation whose #execute
# method runs server streamer call. Again, Operation#execute either
# calls the given block with each response or returns an Enumerator of the
# responses.
#
# == Keyword Args ==
#
# Unspecified keyword arguments are treated as metadata to be sent to the
# server.
#
# @param method [String] the RPC method to call on the GRPC server
# @param req [Object] the request sent to the server
# @param marshal [Function] f(obj)->string that marshals requests
# @param unmarshal [Function] f(string)->obj that unmarshals responses
# @param deadline [Numeric] the max completion time in seconds
# @param return_op [true|false]return an Operation if true
# @param blk [Block] when provided, is executed for each response
# @return [Enumerator|Operation|nil] as discussed above
def server_streamer(method, req, marshal, unmarshal, deadline = nil,
return_op: false, **kw, &blk)
c = new_active_call(method, marshal, unmarshal, deadline || @deadline)
md = @update_metadata.nil? ? kw : @update_metadata.call(kw.clone)
return c.server_streamer(req, **md, &blk) unless return_op
# return the operation view of the active_call; define #execute as a
# new method for this instance that invokes #client_streamer.
op = c.operation
op.define_singleton_method(:execute) do
c.client_streamer(requests, **md)
# return the operation view of the active_call; define #execute
# as a new method for this instance that invokes #server_streamer
op = c.operation
op.define_singleton_method(:execute) do
c.server_streamer(req, **md, &blk)
end
op
end
op
end
# server_streamer sends one request to the GRPC server, which yields a
# stream of responses.
#
# responses provides an enumerator over the streamed responses, i.e. it
# follows Ruby's #each iteration protocol. The enumerator blocks while
# waiting for each response, stops when the server signals that no
# further responses will be supplied. If the implicit block is provided,
# it is executed with each response as the argument and no result is
# returned.
#
# == Flow Control ==
# This is a blocking call.
#
# * the request is sent only when GRPC core's flow control allows it to
# be sent.
#
# * the request will not complete until the server sends the final response
# followed by a status message.
#
# == Errors ==
# An RuntimeError is raised if
#
# * the server responds with a non-OK status when any response is
# * retrieved
#
# * the deadline is exceeded
#
# == Return Value ==
#
# if the return_op is false, the return value is an Enumerator of the
# results, unless a block is provided, in which case the block is
# executed with each response.
#
# if return_op is true, the function returns an Operation whose #execute
# method runs server streamer call. Again, Operation#execute either
# calls the given block with each response or returns an Enumerator of the
# responses.
#
# == Keyword Args ==
#
# Unspecified keyword arguments are treated as metadata to be sent to the
# server.
#
# @param method [String] the RPC method to call on the GRPC server
# @param req [Object] the request sent to the server
# @param marshal [Function] f(obj)->string that marshals requests
# @param unmarshal [Function] f(string)->obj that unmarshals responses
# @param deadline [Numeric] the max completion time in seconds
# @param return_op [true|false] (default false) return an Operation if true
# @param blk [Block] when provided, is executed for each response
# @return [Enumerator|Operation|nil] as discussed above
def server_streamer(method, req, marshal, unmarshal, deadline=nil,
return_op:false, **kw, &blk)
c = new_active_call(method, marshal, unmarshal, deadline || @deadline)
md = @update_metadata.nil? ? kw : @update_metadata.call(kw.clone)
return c.server_streamer(req, **md, &blk) unless return_op
# bidi_streamer sends a stream of requests to the GRPC server, and yields
# a stream of responses.
#
# This method takes an Enumerable of requests, and returns and enumerable
# of responses.
#
# == requests ==
#
# requests provides an 'iterable' of Requests. I.e. it follows Ruby's
# #each enumeration protocol. In the simplest case, requests will be an
# array of marshallable objects; in typical case it will be an
# Enumerable that allows dynamic construction of the marshallable
# objects.
#
# == responses ==
#
# This is an enumerator of responses. I.e, its #next method blocks
# waiting for the next response. Also, if at any point the block needs
# to consume all the remaining responses, this can be done using #each or
# #collect. Calling #each or #collect should only be done if
# the_call#writes_done has been called, otherwise the block will loop
# forever.
#
# == Flow Control ==
# This is a blocking call.
#
# * the call completes when the next call to provided block returns
# * [False]
#
# * the execution block parameters are two objects for sending and
# receiving responses, each of which blocks waiting for flow control.
# E.g, calles to bidi_call#remote_send will wait until flow control
# allows another write before returning; and obviously calls to
# responses#next block until the next response is available.
#
# == Termination ==
#
# As well as sending and receiving messages, the block passed to the
# function is also responsible for:
#
# * calling bidi_call#writes_done to indicate no further reqs will be
# sent.
#
# * returning false if once the bidi stream is functionally completed.
#
# Note that response#next will indicate that there are no further
# responses by throwing StopIteration, but can only happen either
# if bidi_call#writes_done is called.
#
# To terminate the RPC correctly the block:
#
# * must call bidi#writes_done and then
#
# * either return false as soon as there is no need for other responses
#
# * loop on responses#next until no further responses are available
#
# == Errors ==
# An RuntimeError is raised if
#
# * the server responds with a non-OK status when any response is
# * retrieved
#
# * the deadline is exceeded
#
#
# == Keyword Args ==
#
# Unspecified keyword arguments are treated as metadata to be sent to the
# server.
#
# == Return Value ==
#
# if the return_op is false, the return value is an Enumerator of the
# results, unless a block is provided, in which case the block is
# executed with each response.
#
# if return_op is true, the function returns an Operation whose #execute
# method runs the Bidi call. Again, Operation#execute either calls a
# given block with each response or returns an Enumerator of the
# responses.
#
# @param method [String] the RPC method to call on the GRPC server
# @param requests [Object] an Enumerable of requests to send
# @param marshal [Function] f(obj)->string that marshals requests
# @param unmarshal [Function] f(string)->obj that unmarshals responses
# @param deadline [Numeric] (optional) the max completion time in seconds
# @param blk [Block] when provided, is executed for each response
# @param return_op [true|false] return an Operation if true
# @return [Enumerator|nil|Operation] as discussed above
def bidi_streamer(method, requests, marshal, unmarshal, deadline = nil,
return_op: false, **kw, &blk)
c = new_active_call(method, marshal, unmarshal, deadline || @deadline)
md = @update_metadata.nil? ? kw : @update_metadata.call(kw.clone)
return c.bidi_streamer(requests, **md, &blk) unless return_op
# return the operation view of the active_call; define #execute
# as a new method for this instance that invokes #server_streamer
op = c.operation
op.define_singleton_method(:execute) do
c.server_streamer(req, **md, &blk)
# return the operation view of the active_call; define #execute
# as a new method for this instance that invokes #bidi_streamer
op = c.operation
op.define_singleton_method(:execute) do
c.bidi_streamer(requests, **md, &blk)
end
op
end
op
end
# bidi_streamer sends a stream of requests to the GRPC server, and yields
# a stream of responses.
#
# This method takes an Enumerable of requests, and returns and enumerable
# of responses.
#
# == requests ==
#
# requests provides an 'iterable' of Requests. I.e. it follows Ruby's #each
# enumeration protocol. In the simplest case, requests will be an array of
# marshallable objects; in typical case it will be an Enumerable that
# allows dynamic construction of the marshallable objects.
#
# == responses ==
#
# This is an enumerator of responses. I.e, its #next method blocks
# waiting for the next response. Also, if at any point the block needs
# to consume all the remaining responses, this can be done using #each or
# #collect. Calling #each or #collect should only be done if
# the_call#writes_done has been called, otherwise the block will loop
# forever.
#
# == Flow Control ==
# This is a blocking call.
#
# * the call completes when the next call to provided block returns
# * [False]
#
# * the execution block parameters are two objects for sending and
# receiving responses, each of which blocks waiting for flow control.
# E.g, calles to bidi_call#remote_send will wait until flow control
# allows another write before returning; and obviously calls to
# responses#next block until the next response is available.
#
# == Termination ==
#
# As well as sending and receiving messages, the block passed to the
# function is also responsible for:
#
# * calling bidi_call#writes_done to indicate no further reqs will be
# sent.
#
# * returning false if once the bidi stream is functionally completed.
#
# Note that response#next will indicate that there are no further
# responses by throwing StopIteration, but can only happen either
# if bidi_call#writes_done is called.
#
# To terminate the RPC correctly the block:
#
# * must call bidi#writes_done and then
#
# * either return false as soon as there is no need for other responses
#
# * loop on responses#next until no further responses are available
#
# == Errors ==
# An RuntimeError is raised if
#
# * the server responds with a non-OK status when any response is
# * retrieved
#
# * the deadline is exceeded
#
#
# == Keyword Args ==
#
# Unspecified keyword arguments are treated as metadata to be sent to the
# server.
#
# == Return Value ==
#
# if the return_op is false, the return value is an Enumerator of the
# results, unless a block is provided, in which case the block is
# executed with each response.
#
# if return_op is true, the function returns an Operation whose #execute
# method runs the Bidi call. Again, Operation#execute either calls a
# given block with each response or returns an Enumerator of the responses.
#
# @param method [String] the RPC method to call on the GRPC server
# @param requests [Object] an Enumerable of requests to send
# @param marshal [Function] f(obj)->string that marshals requests
# @param unmarshal [Function] f(string)->obj that unmarshals responses
# @param deadline [Numeric] (optional) the max completion time in seconds
# @param blk [Block] when provided, is executed for each response
# @param return_op [true|false] (default false) return an Operation if true
# @return [Enumerator|nil|Operation] as discussed above
def bidi_streamer(method, requests, marshal, unmarshal, deadline=nil,
return_op:false, **kw, &blk)
c = new_active_call(method, marshal, unmarshal, deadline || @deadline)
md = @update_metadata.nil? ? kw : @update_metadata.call(kw.clone)
return c.bidi_streamer(requests, **md, &blk) unless return_op
private
# return the operation view of the active_call; define #execute
# as a new method for this instance that invokes #bidi_streamer
op = c.operation
op.define_singleton_method(:execute) do
c.bidi_streamer(requests, **md, &blk)
# Creates a new active stub
#
# @param ch [GRPC::Channel] the channel used to create the stub.
# @param marshal [Function] f(obj)->string that marshals requests
# @param unmarshal [Function] f(string)->obj that unmarshals responses
# @param deadline [TimeConst]
def new_active_call(ch, marshal, unmarshal, deadline = nil)
absolute_deadline = Core::TimeConsts.from_relative_time(deadline)
call = @ch.create_call(ch, @host, absolute_deadline)
ActiveCall.new(call, @queue, marshal, unmarshal, absolute_deadline,
started: false)
end
op
end
private
# Creates a new active stub
#
# @param ch [GRPC::Channel] the channel used to create the stub.
# @param marshal [Function] f(obj)->string that marshals requests
# @param unmarshal [Function] f(string)->obj that unmarshals responses
# @param deadline [TimeConst]
def new_active_call(ch, marshal, unmarshal, deadline=nil)
absolute_deadline = Core::TimeConsts.from_relative_time(deadline)
call = @ch.create_call(ch, @host, absolute_deadline)
ActiveCall.new(call, @queue, marshal, unmarshal, absolute_deadline,
started:false)
end
end
end

@ -29,54 +29,51 @@
require 'grpc/grpc'
module Google::RPC
# RpcDesc is a Descriptor of an RPC method.
class RpcDesc < Struct.new(:name, :input, :output, :marshal_method,
:unmarshal_method)
include Core::StatusCodes
# Used to wrap a message class to indicate that it needs to be streamed.
class Stream
attr_accessor :type
def initialize(type)
@type = type
module Google
module RPC
# RpcDesc is a Descriptor of an RPC method.
class RpcDesc < Struct.new(:name, :input, :output, :marshal_method,
:unmarshal_method)
include Core::StatusCodes
# Used to wrap a message class to indicate that it needs to be streamed.
class Stream
attr_accessor :type
def initialize(type)
@type = type
end
end
end
# @return [Proc] { |instance| marshalled(instance) }
def marshal_proc
Proc.new { |o| o.class.method(marshal_method).call(o).to_s }
end
# @return [Proc] { |instance| marshalled(instance) }
def marshal_proc
proc { |o| o.class.method(marshal_method).call(o).to_s }
end
# @param [:input, :output] target determines whether to produce the an
# unmarshal Proc for the rpc input parameter or
# its output parameter
#
# @return [Proc] An unmarshal proc { |marshalled(instance)| instance }
def unmarshal_proc(target)
raise ArgumentError if not [:input, :output].include?(target)
unmarshal_class = method(target).call
if unmarshal_class.is_a?Stream
unmarshal_class = unmarshal_class.type
# @param [:input, :output] target determines whether to produce the an
# unmarshal Proc for the rpc input parameter or
# its output parameter
#
# @return [Proc] An unmarshal proc { |marshalled(instance)| instance }
def unmarshal_proc(target)
fail ArgumentError unless [:input, :output].include?(target)
unmarshal_class = method(target).call
unmarshal_class = unmarshal_class.type if unmarshal_class.is_a? Stream
proc { |o| unmarshal_class.method(unmarshal_method).call(o) }
end
Proc.new { |o| unmarshal_class.method(unmarshal_method).call(o) }
end
def run_server_method(active_call, mth)
# While a server method is running, it might be cancelled, its deadline
# might be reached, the handler could throw an unknown error, or a
# well-behaved handler could throw a StatusError.
begin
if is_request_response?
def run_server_method(active_call, mth)
# While a server method is running, it might be cancelled, its deadline
# might be reached, the handler could throw an unknown error, or a
# well-behaved handler could throw a StatusError.
if request_response?
req = active_call.remote_read
resp = mth.call(req, active_call.single_req_view)
active_call.remote_send(resp)
elsif is_client_streamer?
elsif client_streamer?
resp = mth.call(active_call.multi_req_view)
active_call.remote_send(resp)
elsif is_server_streamer?
elsif server_streamer?
req = active_call.remote_read
replys = mth.call(req, active_call.single_req_view)
replys.each { |r| active_call.remote_send(r) }
@ -88,7 +85,7 @@ module Google::RPC
rescue BadStatus => e
# this is raised by handlers that want GRPC to send an application
# error code and detail message.
logger.debug("app error: #{active_call}, status:#{e.code}:#{e.details}")
logger.debug("app err: #{active_call}, status:#{e.code}:#{e.details}")
send_status(active_call, e.code, e.details)
rescue Core::CallError => e
# This is raised by GRPC internals but should rarely, if ever happen.
@ -110,50 +107,46 @@ module Google::RPC
logger.warn(e)
send_status(active_call, UNKNOWN, 'no reason given')
end
end
def assert_arity_matches(mth)
if (is_request_response? || is_server_streamer?)
if mth.arity != 2
raise arity_error(mth, 2, "should be #{mth.name}(req, call)")
end
else
if mth.arity != 1
raise arity_error(mth, 1, "should be #{mth.name}(call)")
def assert_arity_matches(mth)
if request_response? || server_streamer?
if mth.arity != 2
fail arity_error(mth, 2, "should be #{mth.name}(req, call)")
end
else
if mth.arity != 1
fail arity_error(mth, 1, "should be #{mth.name}(call)")
end
end
end
end
def is_request_response?
!input.is_a?(Stream) && !output.is_a?(Stream)
end
def request_response?
!input.is_a?(Stream) && !output.is_a?(Stream)
end
def is_client_streamer?
input.is_a?(Stream) && !output.is_a?(Stream)
end
def client_streamer?
input.is_a?(Stream) && !output.is_a?(Stream)
end
def is_server_streamer?
!input.is_a?(Stream) && output.is_a?(Stream)
end
def server_streamer?
!input.is_a?(Stream) && output.is_a?(Stream)
end
def is_bidi_streamer?
input.is_a?(Stream) && output.is_a?(Stream)
end
def bidi_streamer?
input.is_a?(Stream) && output.is_a?(Stream)
end
def arity_error(mth, want, msg)
"##{mth.name}: bad arg count; got:#{mth.arity}, want:#{want}, #{msg}"
end
def arity_error(mth, want, msg)
"##{mth.name}: bad arg count; got:#{mth.arity}, want:#{want}, #{msg}"
end
def send_status(active_client, code, details)
begin
def send_status(active_client, code, details)
details = 'Not sure why' if details.nil?
active_client.send_status(code, details)
rescue StandardError => e
logger.warn('Could not send status %d:%s' % [code, details])
logger.warn("Could not send status #{code}:#{details}")
logger.warn(e)
end
end
end
end

@ -33,382 +33,378 @@ require 'grpc/generic/service'
require 'thread'
require 'xray/thread_dump_signal_handler'
module Google::RPC
# RpcServer hosts a number of services and makes them available on the
# network.
class RpcServer
include Core::CompletionType
include Core::TimeConsts
extend ::Forwardable
def_delegators :@server, :add_http2_port
# Default thread pool size is 3
DEFAULT_POOL_SIZE = 3
# Default max_waiting_requests size is 20
DEFAULT_MAX_WAITING_REQUESTS = 20
# Creates a new RpcServer.
#
# The RPC server is configured using keyword arguments.
#
# There are some specific keyword args used to configure the RpcServer
# instance, however other arbitrary are allowed and when present are used
# to configure the listeninng connection set up by the RpcServer.
#
# * server_override: which if passed must be a [GRPC::Core::Server]. When
# present.
#
# * poll_period: when present, the server polls for new events with this
# period
#
# * pool_size: the size of the thread pool the server uses to run its
# threads
#
# * completion_queue_override: when supplied, this will be used as the
# completion_queue that the server uses to receive network events,
# otherwise its creates a new instance itself
#
# * creds: [GRPC::Core::ServerCredentials]
# the credentials used to secure the server
#
# * max_waiting_requests: the maximum number of requests that are not
# being handled to allow. When this limit is exceeded, the server responds
# with not available to new requests
def initialize(pool_size:DEFAULT_POOL_SIZE,
max_waiting_requests:DEFAULT_MAX_WAITING_REQUESTS,
poll_period:INFINITE_FUTURE,
completion_queue_override:nil,
creds:nil,
server_override:nil,
**kw)
if !completion_queue_override.nil?
cq = completion_queue_override
if !cq.is_a?(Core::CompletionQueue)
raise ArgumentError.new('not a CompletionQueue')
module Google
# Google::RPC contains the General RPC module.
module RPC
# RpcServer hosts a number of services and makes them available on the
# network.
class RpcServer
include Core::CompletionType
include Core::TimeConsts
extend ::Forwardable
def_delegators :@server, :add_http2_port
# Default thread pool size is 3
DEFAULT_POOL_SIZE = 3
# Default max_waiting_requests size is 20
DEFAULT_MAX_WAITING_REQUESTS = 20
# Creates a new RpcServer.
#
# The RPC server is configured using keyword arguments.
#
# There are some specific keyword args used to configure the RpcServer
# instance, however other arbitrary are allowed and when present are used
# to configure the listeninng connection set up by the RpcServer.
#
# * server_override: which if passed must be a [GRPC::Core::Server]. When
# present.
#
# * poll_period: when present, the server polls for new events with this
# period
#
# * pool_size: the size of the thread pool the server uses to run its
# threads
#
# * completion_queue_override: when supplied, this will be used as the
# completion_queue that the server uses to receive network events,
# otherwise its creates a new instance itself
#
# * creds: [GRPC::Core::ServerCredentials]
# the credentials used to secure the server
#
# * max_waiting_requests: the maximum number of requests that are not
# being handled to allow. When this limit is exceeded, the server responds
# with not available to new requests
def initialize(pool_size:DEFAULT_POOL_SIZE,
max_waiting_requests:DEFAULT_MAX_WAITING_REQUESTS,
poll_period:INFINITE_FUTURE,
completion_queue_override:nil,
creds:nil,
server_override:nil,
**kw)
if completion_queue_override.nil?
cq = Core::CompletionQueue.new
else
cq = completion_queue_override
unless cq.is_a? Core::CompletionQueue
fail(ArgumentError, 'not a CompletionQueue')
end
end
else
cq = Core::CompletionQueue.new
end
@cq = cq
if !server_override.nil?
srv = server_override
raise ArgumentError.new('not a Server') unless srv.is_a?(Core::Server)
elsif creds.nil?
srv = Core::Server.new(@cq, kw)
elsif !creds.is_a?(Core::ServerCredentials)
raise ArgumentError.new('not a ServerCredentials')
else
srv = Core::Server.new(@cq, kw, creds)
@cq = cq
if server_override.nil?
if creds.nil?
srv = Core::Server.new(@cq, kw)
elsif !creds.is_a? Core::ServerCredentials
fail(ArgumentError, 'not a ServerCredentials')
else
srv = Core::Server.new(@cq, kw, creds)
end
else
srv = server_override
fail(ArgumentError, 'not a Server') unless srv.is_a? Core::Server
end
@server = srv
@pool_size = pool_size
@max_waiting_requests = max_waiting_requests
@poll_period = poll_period
@run_mutex = Mutex.new
@run_cond = ConditionVariable.new
@pool = Pool.new(@pool_size)
end
@server = srv
@pool_size = pool_size
@max_waiting_requests = max_waiting_requests
@poll_period = poll_period
@run_mutex = Mutex.new
@run_cond = ConditionVariable.new
@pool = Pool.new(@pool_size)
end
# stops a running server
#
# the call has no impact if the server is already stopped, otherwise
# server's current call loop is it's last.
def stop
if @running
# stops a running server
#
# the call has no impact if the server is already stopped, otherwise
# server's current call loop is it's last.
def stop
return unless @running
@stopped = true
@pool.stop
end
end
# determines if the server is currently running
def running?
@running ||= false
end
# determines if the server is currently running
def running?
@running ||= false
end
# Is called from other threads to wait for #run to start up the server.
#
# If run has not been called, this returns immediately.
#
# @param timeout [Numeric] number of seconds to wait
# @result [true, false] true if the server is running, false otherwise
def wait_till_running(timeout=0.1)
end_time, sleep_period = Time.now + timeout, (1.0 * timeout)/100
while Time.now < end_time
if !running?
@run_mutex.synchronize { @run_cond.wait(@run_mutex) }
# Is called from other threads to wait for #run to start up the server.
#
# If run has not been called, this returns immediately.
#
# @param timeout [Numeric] number of seconds to wait
# @result [true, false] true if the server is running, false otherwise
def wait_till_running(timeout = 0.1)
end_time, sleep_period = Time.now + timeout, (1.0 * timeout) / 100
while Time.now < end_time
@run_mutex.synchronize { @run_cond.wait(@run_mutex) } unless running?
sleep(sleep_period)
end
sleep(sleep_period)
running?
end
return running?
end
# determines if the server is currently stopped
def stopped?
@stopped ||= false
end
# handle registration of classes
#
# service is either a class that includes GRPC::GenericService and whose
# #new function can be called without argument or any instance of such a
# class.
#
# E.g, after
#
# class Divider
# include GRPC::GenericService
# rpc :div DivArgs, DivReply # single request, single response
# def initialize(optional_arg='default option') # no args
# ...
# end
#
# srv = GRPC::RpcServer.new(...)
#
# # Either of these works
#
# srv.handle(Divider)
#
# # or
#
# srv.handle(Divider.new('replace optional arg'))
#
# It raises RuntimeError:
# - if service is not valid service class or object
# - if it is a valid service, but the handler methods are already registered
# - if the server is already running
#
# @param service [Object|Class] a service class or object as described
# above
def handle(service)
raise 'cannot add services if the server is running' if running?
raise 'cannot add services if the server is stopped' if stopped?
cls = service.is_a?(Class) ? service : service.class
assert_valid_service_class(cls)
add_rpc_descs_for(service)
end
# runs the server
#
# - if no rpc_descs are registered, this exits immediately, otherwise it
# continues running permanently and does not return until program exit.
#
# - #running? returns true after this is called, until #stop cause the
# the server to stop.
def run
if rpc_descs.size == 0
logger.warn('did not run as no services were present')
return
# determines if the server is currently stopped
def stopped?
@stopped ||= false
end
@run_mutex.synchronize do
@running = true
@run_cond.signal
# handle registration of classes
#
# service is either a class that includes GRPC::GenericService and whose
# #new function can be called without argument or any instance of such a
# class.
#
# E.g, after
#
# class Divider
# include GRPC::GenericService
# rpc :div DivArgs, DivReply # single request, single response
# def initialize(optional_arg='default option') # no args
# ...
# end
#
# srv = GRPC::RpcServer.new(...)
#
# # Either of these works
#
# srv.handle(Divider)
#
# # or
#
# srv.handle(Divider.new('replace optional arg'))
#
# It raises RuntimeError:
# - if service is not valid service class or object
# - its handler methods are already registered
# - if the server is already running
#
# @param service [Object|Class] a service class or object as described
# above
def handle(service)
fail 'cannot add services if the server is running' if running?
fail 'cannot add services if the server is stopped' if stopped?
cls = service.is_a?(Class) ? service : service.class
assert_valid_service_class(cls)
add_rpc_descs_for(service)
end
@pool.start
@server.start
server_tag = Object.new
while !stopped?
@server.request_call(server_tag)
ev = @cq.pluck(server_tag, @poll_period)
next if ev.nil?
if ev.type != SERVER_RPC_NEW
logger.warn("bad evt: got:#{ev.type}, want:#{SERVER_RPC_NEW}")
ev.close
next
# runs the server
#
# - if no rpc_descs are registered, this exits immediately, otherwise it
# continues running permanently and does not return until program exit.
#
# - #running? returns true after this is called, until #stop cause the
# the server to stop.
def run
if rpc_descs.size == 0
logger.warn('did not run as no services were present')
return
end
c = new_active_server_call(ev.call, ev.result)
if !c.nil?
mth = ev.result.method.to_sym
ev.close
@pool.schedule(c) do |call|
rpc_descs[mth].run_server_method(call, rpc_handlers[mth])
@run_mutex.synchronize do
@running = true
@run_cond.signal
end
@pool.start
@server.start
server_tag = Object.new
until stopped?
@server.request_call(server_tag)
ev = @cq.pluck(server_tag, @poll_period)
next if ev.nil?
if ev.type != SERVER_RPC_NEW
logger.warn("bad evt: got:#{ev.type}, want:#{SERVER_RPC_NEW}")
ev.close
next
end
c = new_active_server_call(ev.call, ev.result)
unless c.nil?
mth = ev.result.method.to_sym
ev.close
@pool.schedule(c) do |call|
rpc_descs[mth].run_server_method(call, rpc_handlers[mth])
end
end
end
end
@running = false
end
def new_active_server_call(call, new_server_rpc)
# TODO(temiola): perhaps reuse the main server completion queue here, but
# for now, create a new completion queue per call, pending best practice
# usage advice from the c core.
# Accept the call. This is necessary even if a status is to be sent back
# immediately
finished_tag = Object.new
call_queue = Core::CompletionQueue.new
call.metadata = new_server_rpc.metadata # store the metadata on the call
call.server_accept(call_queue, finished_tag)
call.server_end_initial_metadata()
# Send UNAVAILABLE if there are too many unprocessed jobs
jobs_count, max = @pool.jobs_waiting, @max_waiting_requests
logger.info("waiting: #{jobs_count}, max: #{max}")
if @pool.jobs_waiting > @max_waiting_requests
logger.warn("NOT AVAILABLE: too many jobs_waiting: #{new_server_rpc}")
noop = Proc.new { |x| x }
c = ActiveCall.new(call, call_queue, noop, noop,
new_server_rpc.deadline, finished_tag: finished_tag)
c.send_status(StatusCodes::UNAVAILABLE, '')
return nil
@running = false
end
# Send NOT_FOUND if the method does not exist
mth = new_server_rpc.method.to_sym
if !rpc_descs.has_key?(mth)
logger.warn("NOT_FOUND: #{new_server_rpc}")
noop = Proc.new { |x| x }
c = ActiveCall.new(call, call_queue, noop, noop,
new_server_rpc.deadline, finished_tag: finished_tag)
c.send_status(StatusCodes::NOT_FOUND, '')
return nil
end
def new_active_server_call(call, new_server_rpc)
# TODO(temiola): perhaps reuse the main server completion queue here,
# but for now, create a new completion queue per call, pending best
# practice usage advice from the c core.
# Accept the call. This is necessary even if a status is to be sent
# back immediately
finished_tag = Object.new
call_queue = Core::CompletionQueue.new
call.metadata = new_server_rpc.metadata # store the metadata
call.server_accept(call_queue, finished_tag)
call.server_end_initial_metadata
# Send UNAVAILABLE if there are too many unprocessed jobs
jobs_count, max = @pool.jobs_waiting, @max_waiting_requests
logger.info("waiting: #{jobs_count}, max: #{max}")
if @pool.jobs_waiting > @max_waiting_requests
logger.warn("NOT AVAILABLE: too many jobs_waiting: #{new_server_rpc}")
noop = proc { |x| x }
c = ActiveCall.new(call, call_queue, noop, noop,
new_server_rpc.deadline,
finished_tag: finished_tag)
c.send_status(StatusCodes::UNAVAILABLE, '')
return nil
end
# Create the ActiveCall
rpc_desc = rpc_descs[mth]
logger.info("deadline is #{new_server_rpc.deadline}; (now=#{Time.now})")
ActiveCall.new(call, call_queue,
rpc_desc.marshal_proc, rpc_desc.unmarshal_proc(:input),
new_server_rpc.deadline, finished_tag: finished_tag)
end
# Send NOT_FOUND if the method does not exist
mth = new_server_rpc.method.to_sym
unless rpc_descs.key?(mth)
logger.warn("NOT_FOUND: #{new_server_rpc}")
noop = proc { |x| x }
c = ActiveCall.new(call, call_queue, noop, noop,
new_server_rpc.deadline,
finished_tag: finished_tag)
c.send_status(StatusCodes::NOT_FOUND, '')
return nil
end
# Pool is a simple thread pool for running server requests.
class Pool
def initialize(size)
raise 'pool size must be positive' unless size > 0
@jobs = Queue.new
@size = size
@stopped = false
@stop_mutex = Mutex.new
@stop_cond = ConditionVariable.new
@workers = []
# Create the ActiveCall
rpc_desc = rpc_descs[mth]
logger.info("deadline is #{new_server_rpc.deadline}; (now=#{Time.now})")
ActiveCall.new(call, call_queue,
rpc_desc.marshal_proc, rpc_desc.unmarshal_proc(:input),
new_server_rpc.deadline, finished_tag: finished_tag)
end
# Returns the number of jobs waiting
def jobs_waiting
@jobs.size
end
# Pool is a simple thread pool for running server requests.
class Pool
def initialize(size)
fail 'pool size must be positive' unless size > 0
@jobs = Queue.new
@size = size
@stopped = false
@stop_mutex = Mutex.new
@stop_cond = ConditionVariable.new
@workers = []
end
# Runs the given block on the queue with the provided args.
#
# @param args the args passed blk when it is called
# @param blk the block to call
def schedule(*args, &blk)
raise 'already stopped' if @stopped
return if blk.nil?
logger.info('schedule another job')
@jobs << [blk, args]
end
# Returns the number of jobs waiting
def jobs_waiting
@jobs.size
end
# Runs the given block on the queue with the provided args.
#
# @param args the args passed blk when it is called
# @param blk the block to call
def schedule(*args, &blk)
fail 'already stopped' if @stopped
return if blk.nil?
logger.info('schedule another job')
@jobs << [blk, args]
end
# Starts running the jobs in the thread pool.
def start
raise 'already stopped' if @stopped
until @workers.size == @size.to_i
next_thread = Thread.new do
catch(:exit) do # allows { throw :exit } to kill a thread
loop do
begin
blk, args = @jobs.pop
blk.call(*args)
rescue StandardError => e
logger.warn('Error in worker thread')
logger.warn(e)
# Starts running the jobs in the thread pool.
def start
fail 'already stopped' if @stopped
until @workers.size == @size.to_i
next_thread = Thread.new do
catch(:exit) do # allows { throw :exit } to kill a thread
loop do
begin
blk, args = @jobs.pop
blk.call(*args)
rescue StandardError => e
logger.warn('Error in worker thread')
logger.warn(e)
end
end
end
end
# removes the threads from workers, and signal when all the threads
# are complete.
@stop_mutex.synchronize do
@workers.delete(Thread.current)
if @workers.size == 0
@stop_cond.signal
# removes the threads from workers, and signal when all the
# threads are complete.
@stop_mutex.synchronize do
@workers.delete(Thread.current)
@stop_cond.signal if @workers.size == 0
end
end
@workers << next_thread
end
@workers << next_thread
end
end
# Stops the jobs in the pool
def stop
logger.info('stopping, will wait for all the workers to exit')
@workers.size.times { schedule { throw :exit } }
@stopped = true
# Stops the jobs in the pool
def stop
logger.info('stopping, will wait for all the workers to exit')
@workers.size.times { schedule { throw :exit } }
@stopped = true
# TODO(temiola): allow configuration of the keepalive period
keep_alive = 5
@stop_mutex.synchronize do
if @workers.size > 0
@stop_cond.wait(@stop_mutex, keep_alive)
# TODO(temiola): allow configuration of the keepalive period
keep_alive = 5
@stop_mutex.synchronize do
@stop_cond.wait(@stop_mutex, keep_alive) if @workers.size > 0
end
end
# Forcibly shutdown any threads that are still alive.
if @workers.size > 0
logger.warn("forcibly terminating #{@workers.size} worker(s)")
@workers.each do |t|
next unless t.alive?
begin
t.exit
rescue StandardError => e
logger.warn('error while terminating a worker')
logger.warn(e)
# Forcibly shutdown any threads that are still alive.
if @workers.size > 0
logger.warn("forcibly terminating #{@workers.size} worker(s)")
@workers.each do |t|
next unless t.alive?
begin
t.exit
rescue StandardError => e
logger.warn('error while terminating a worker')
logger.warn(e)
end
end
end
end
logger.info('stopped, all workers are shutdown')
logger.info('stopped, all workers are shutdown')
end
end
end
protected
protected
def rpc_descs
@rpc_descs ||= {}
end
def rpc_descs
@rpc_descs ||= {}
end
def rpc_handlers
@rpc_handlers ||= {}
end
def rpc_handlers
@rpc_handlers ||= {}
end
private
private
def assert_valid_service_class(cls)
if !cls.include?(GenericService)
raise "#{cls} should 'include GenericService'"
end
if cls.rpc_descs.size == 0
raise "#{cls} should specify some rpc descriptions"
def assert_valid_service_class(cls)
unless cls.include?(GenericService)
fail "#{cls} should 'include GenericService'"
end
if cls.rpc_descs.size == 0
fail "#{cls} should specify some rpc descriptions"
end
cls.assert_rpc_descs_have_methods
end
cls.assert_rpc_descs_have_methods
end
def add_rpc_descs_for(service)
cls = service.is_a?(Class) ? service : service.class
specs = rpc_descs
handlers = rpc_handlers
cls.rpc_descs.each_pair do |name,spec|
route = "/#{cls.service_name}/#{name}".to_sym
if specs.has_key?(route)
raise "Cannot add rpc #{route} from #{spec}, already registered"
else
specs[route] = spec
if service.is_a?(Class)
handlers[route] = cls.new.method(name.to_s.underscore.to_sym)
def add_rpc_descs_for(service)
cls = service.is_a?(Class) ? service : service.class
specs = rpc_descs
handlers = rpc_handlers
cls.rpc_descs.each_pair do |name, spec|
route = "/#{cls.service_name}/#{name}".to_sym
if specs.key? route
fail "Cannot add rpc #{route} from #{spec}, already registered"
else
handlers[route] = service.method(name.to_s.underscore.to_sym)
specs[route] = spec
if service.is_a?(Class)
handlers[route] = cls.new.method(name.to_s.underscore.to_sym)
else
handlers[route] = service.method(name.to_s.underscore.to_sym)
end
logger.info("handling #{route} with #{handlers[route]}")
end
logger.info("handling #{route} with #{handlers[route]}")
end
end
end
end
end

@ -32,7 +32,6 @@ require 'grpc/generic/rpc_desc'
# Extend String to add a method underscore
class String
# creates a new string that is the underscore separate version of this one.
#
# E.g,
@ -40,210 +39,199 @@ class String
# AMethod -> a_method
# AnRpc -> an_rpc
def underscore
word = self.dup
word = dup
word.gsub!(/([A-Z]+)([A-Z][a-z])/, '\1_\2')
word.gsub!(/([a-z\d])([A-Z])/, '\1_\2')
word.tr!('-', '_')
word.downcase!
word
end
end
module Google::RPC
# Provides behaviour used to implement schema-derived service classes.
#
# Is intended to be used to support both client and server IDL-schema-derived
# servers.
module GenericService
# Used to indicate that a name has already been specified
class DuplicateRpcName < StandardError
def initialize(name)
super("rpc (#{name}) is already defined")
end
end
# Provides a simple DSL to describe RPC services.
#
# E.g, a Maths service that uses the serializable messages DivArgs,
# DivReply and Num might define its endpoint uses the following way:
#
# rpc :div DivArgs, DivReply # single request, single response
# rpc :sum stream(Num), Num # streamed input, single response
# rpc :fib FibArgs, stream(Num) # single request, streamed response
# rpc :div_many stream(DivArgs), stream(DivReply)
# # streamed req and resp
module Google
# Google::RPC contains the General RPC module.
module RPC
# Provides behaviour used to implement schema-derived service classes.
#
# Each 'rpc' adds an RpcDesc to classes including this module, and
# #assert_rpc_descs_have_methods is used to ensure the including class
# provides methods with signatures that support all the descriptors.
module Dsl
# This configures the method names that the serializable message
# implementation uses to marshal and unmarshal messages.
#
# - unmarshal_class method must be a class method on the serializable
# message type that takes a string (byte stream) and produces and object
#
# - marshal_class_method is called on a serializable message instance
# and produces a serialized string.
#
# The Dsl verifies that the types in the descriptor have both the
# unmarshal and marshal methods.
attr_writer(:marshal_class_method, :unmarshal_class_method)
# This allows configuration of the service name.
attr_accessor(:service_name)
# Is intended to be used to support both client and server
# IDL-schema-derived servers.
module GenericService
# Used to indicate that a name has already been specified
class DuplicateRpcName < StandardError
def initialize(name)
super("rpc (#{name}) is already defined")
end
end
# Adds an RPC spec.
# Provides a simple DSL to describe RPC services.
#
# Takes the RPC name and the classes representing the types to be
# serialized, and adds them to the including classes rpc_desc hash.
# E.g, a Maths service that uses the serializable messages DivArgs,
# DivReply and Num might define its endpoint uses the following way:
#
# input and output should both have the methods #marshal and #unmarshal
# that are responsible for writing and reading an object instance from a
# byte buffer respectively.
# rpc :div DivArgs, DivReply # single request, single response
# rpc :sum stream(Num), Num # streamed input, single response
# rpc :fib FibArgs, stream(Num) # single request, streamed response
# rpc :div_many stream(DivArgs), stream(DivReply)
# # streamed req and resp
#
# @param name [String] the name of the rpc
# @param input [Object] the input parameter's class
# @param output [Object] the output parameter's class
def rpc(name, input, output)
raise DuplicateRpcName, name if rpc_descs.has_key?(name)
assert_can_marshal(input)
assert_can_marshal(output)
rpc_descs[name] = RpcDesc.new(name, input, output,
marshal_class_method,
unmarshal_class_method)
end
def inherited(subclass)
# Each subclass should have a distinct class variable with its own
# rpc_descs
subclass.rpc_descs.merge!(rpc_descs)
subclass.service_name = service_name
end
# the name of the instance method used to marshal events to a byte stream.
def marshal_class_method
@marshal_class_method ||= :marshal
end
# Each 'rpc' adds an RpcDesc to classes including this module, and
# #assert_rpc_descs_have_methods is used to ensure the including class
# provides methods with signatures that support all the descriptors.
module Dsl
# This configures the method names that the serializable message
# implementation uses to marshal and unmarshal messages.
#
# - unmarshal_class method must be a class method on the serializable
# message type that takes a string (byte stream) and produces and object
#
# - marshal_class_method is called on a serializable message instance
# and produces a serialized string.
#
# The Dsl verifies that the types in the descriptor have both the
# unmarshal and marshal methods.
attr_writer(:marshal_class_method, :unmarshal_class_method)
# This allows configuration of the service name.
attr_accessor(:service_name)
# Adds an RPC spec.
#
# Takes the RPC name and the classes representing the types to be
# serialized, and adds them to the including classes rpc_desc hash.
#
# input and output should both have the methods #marshal and #unmarshal
# that are responsible for writing and reading an object instance from a
# byte buffer respectively.
#
# @param name [String] the name of the rpc
# @param input [Object] the input parameter's class
# @param output [Object] the output parameter's class
def rpc(name, input, output)
fail(DuplicateRpcName, name) if rpc_descs.key? name
assert_can_marshal(input)
assert_can_marshal(output)
rpc_descs[name] = RpcDesc.new(name, input, output,
marshal_class_method,
unmarshal_class_method)
end
# the name of the class method used to unmarshal from a byte stream.
def unmarshal_class_method
@unmarshal_class_method ||= :unmarshal
end
def inherited(subclass)
# Each subclass should have a distinct class variable with its own
# rpc_descs
subclass.rpc_descs.merge!(rpc_descs)
subclass.service_name = service_name
end
def assert_can_marshal(cls)
if cls.is_a?RpcDesc::Stream
cls = cls.type
# the name of the instance method used to marshal events to a byte
# stream.
def marshal_class_method
@marshal_class_method ||= :marshal
end
mth = unmarshal_class_method
if !cls.methods.include?(mth)
raise ArgumentError, "#{cls} needs #{cls}.#{mth}"
# the name of the class method used to unmarshal from a byte stream.
def unmarshal_class_method
@unmarshal_class_method ||= :unmarshal
end
mth = marshal_class_method
if !cls.methods.include?(mth)
raise ArgumentError, "#{cls} needs #{cls}.#{mth}"
def assert_can_marshal(cls)
cls = cls.type if cls.is_a? RpcDesc::Stream
mth = unmarshal_class_method
unless cls.methods.include? mth
fail(ArgumentError, "#{cls} needs #{cls}.#{mth}")
end
mth = marshal_class_method
return if cls.methods.include? mth
fail(ArgumentError, "#{cls} needs #{cls}.#{mth}")
end
end
# @param cls [Class] the class of a serializable type
# @return cls wrapped in a RpcDesc::Stream
def stream(cls)
assert_can_marshal(cls)
RpcDesc::Stream.new(cls)
end
# @param cls [Class] the class of a serializable type
# @return cls wrapped in a RpcDesc::Stream
def stream(cls)
assert_can_marshal(cls)
RpcDesc::Stream.new(cls)
end
# the RpcDescs defined for this GenericService, keyed by name.
def rpc_descs
@rpc_descs ||= {}
end
# the RpcDescs defined for this GenericService, keyed by name.
def rpc_descs
@rpc_descs ||= {}
end
# Creates a rpc client class with methods for accessing the methods
# currently in rpc_descs.
def rpc_stub_class
descs = rpc_descs
route_prefix = service_name
Class.new(ClientStub) do
# @param host [String] the host the stub connects to
# @param kw [KeywordArgs] the channel arguments, plus any optional
# args for configuring the client's channel
def initialize(host, **kw)
super(host, Core::CompletionQueue.new, **kw)
end
# Creates a rpc client class with methods for accessing the methods
# currently in rpc_descs.
def rpc_stub_class
descs = rpc_descs
route_prefix = service_name
Class.new(ClientStub) do
# @param host [String] the host the stub connects to
# @param kw [KeywordArgs] the channel arguments, plus any optional
# args for configuring the client's channel
def initialize(host, **kw)
super(host, Core::CompletionQueue.new, **kw)
end
# Used define_method to add a method for each rpc_desc. Each method
# calls the base class method for the given descriptor.
descs.each_pair do |name,desc|
mth_name = name.to_s.underscore.to_sym
marshal = desc.marshal_proc
unmarshal = desc.unmarshal_proc(:output)
route = "/#{route_prefix}/#{name}"
if desc.is_request_response?
define_method(mth_name) do |req,deadline=nil|
logger.debug("calling #{@host}:#{route}")
request_response(route, req, marshal, unmarshal, deadline)
end
elsif desc.is_client_streamer?
define_method(mth_name) do |reqs,deadline=nil|
logger.debug("calling #{@host}:#{route}")
client_streamer(route, reqs, marshal, unmarshal, deadline)
end
elsif desc.is_server_streamer?
define_method(mth_name) do |req,deadline=nil,&blk|
logger.debug("calling #{@host}:#{route}")
server_streamer(route, req, marshal, unmarshal, deadline, &blk)
end
else # is a bidi_stream
define_method(mth_name) do |reqs, deadline=nil,&blk|
logger.debug("calling #{@host}:#{route}")
bidi_streamer(route, reqs, marshal, unmarshal, deadline, &blk)
# Used define_method to add a method for each rpc_desc. Each method
# calls the base class method for the given descriptor.
descs.each_pair do |name, desc|
mth_name = name.to_s.underscore.to_sym
marshal = desc.marshal_proc
unmarshal = desc.unmarshal_proc(:output)
route = "/#{route_prefix}/#{name}"
if desc.request_response?
define_method(mth_name) do |req, deadline = nil|
logger.debug("calling #{@host}:#{route}")
request_response(route, req, marshal, unmarshal, deadline)
end
elsif desc.client_streamer?
define_method(mth_name) do |reqs, deadline = nil|
logger.debug("calling #{@host}:#{route}")
client_streamer(route, reqs, marshal, unmarshal, deadline)
end
elsif desc.server_streamer?
define_method(mth_name) do |req, deadline = nil, &blk|
logger.debug("calling #{@host}:#{route}")
server_streamer(route, req, marshal, unmarshal, deadline,
&blk)
end
else # is a bidi_stream
define_method(mth_name) do |reqs, deadline = nil, &blk|
logger.debug("calling #{@host}:#{route}")
bidi_streamer(route, reqs, marshal, unmarshal, deadline, &blk)
end
end
end
end
end
end
# Asserts that the appropriate methods are defined for each added rpc
# spec. Is intended to aid verifying that server classes are correctly
# implemented.
def assert_rpc_descs_have_methods
rpc_descs.each_pair do |m,spec|
mth_name = m.to_s.underscore.to_sym
if !self.instance_methods.include?(mth_name)
raise "#{self} does not provide instance method '#{mth_name}'"
# Asserts that the appropriate methods are defined for each added rpc
# spec. Is intended to aid verifying that server classes are correctly
# implemented.
def assert_rpc_descs_have_methods
rpc_descs.each_pair do |m, spec|
mth_name = m.to_s.underscore.to_sym
unless instance_methods.include?(mth_name)
fail "#{self} does not provide instance method '#{mth_name}'"
end
spec.assert_arity_matches(instance_method(mth_name))
end
spec.assert_arity_matches(self.instance_method(mth_name))
end
end
end
def self.included(o)
o.extend(Dsl)
# Update to the use the service name including module. Proivde a default
# that can be nil e,g. when modules are declared dynamically.
return unless o.service_name.nil?
if o.name.nil?
o.service_name = 'GenericService'
else
modules = o.name.split('::')
if modules.length > 2
o.service_name = modules[modules.length - 2]
def self.included(o)
o.extend(Dsl)
# Update to the use the service name including module. Proivde a default
# that can be nil e,g. when modules are declared dynamically.
return unless o.service_name.nil?
if o.name.nil?
o.service_name = 'GenericService'
else
o.service_name = modules.first
modules = o.name.split('::')
if modules.length > 2
o.service_name = modules[modules.length - 2]
else
o.service_name = modules.first
end
end
end
end
end
end

@ -28,6 +28,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
module Google
# Google::RPC contains the General RPC module.
module RPC
VERSION = '0.0.1'
end

@ -30,7 +30,6 @@
require 'grpc'
describe 'Wrapped classes where .new cannot create an instance' do
describe GRPC::Core::Event do
it 'should fail .new fail with a runtime error' do
expect { GRPC::Core::Event.new }.to raise_error(TypeError)
@ -42,5 +41,4 @@ describe 'Wrapped classes where .new cannot create an instance' do
expect { GRPC::Core::Event.new }.to raise_error(TypeError)
end
end
end

@ -30,9 +30,7 @@
require 'grpc'
describe GRPC::Core::ByteBuffer do
describe '#new' do
it 'is constructed from a string' do
expect { GRPC::Core::ByteBuffer.new('#new') }.not_to raise_error
end
@ -50,7 +48,6 @@ describe GRPC::Core::ByteBuffer do
expect { GRPC::Core::ByteBuffer.new(x) }.to raise_error TypeError
end
end
end
describe '#to_s' do
@ -67,5 +64,4 @@ describe GRPC::Core::ByteBuffer do
expect(a_copy.dup.to_s).to eq('#dup')
end
end
end

@ -33,30 +33,29 @@ require 'port_picker'
include GRPC::Core::StatusCodes
describe GRPC::Core::RpcErrors do
before(:each) do
@known_types = {
:OK => 0,
:ERROR => 1,
:NOT_ON_SERVER => 2,
:NOT_ON_CLIENT => 3,
:ALREADY_ACCEPTED => 4,
:ALREADY_INVOKED => 5,
:NOT_INVOKED => 6,
:ALREADY_FINISHED => 7,
:TOO_MANY_OPERATIONS => 8,
:INVALID_FLAGS => 9,
:ErrorMessages => {
0=>'ok',
1=>'unknown error',
2=>'not available on a server',
3=>'not available on a client',
4=>'call is already accepted',
5=>'call is already invoked',
6=>'call is not yet invoked',
7=>'call is already finished',
8=>'outstanding read or write present',
9=>'a bad flag was given',
OK: 0,
ERROR: 1,
NOT_ON_SERVER: 2,
NOT_ON_CLIENT: 3,
ALREADY_ACCEPTED: 4,
ALREADY_INVOKED: 5,
NOT_INVOKED: 6,
ALREADY_FINISHED: 7,
TOO_MANY_OPERATIONS: 8,
INVALID_FLAGS: 9,
ErrorMessages: {
0 => 'ok',
1 => 'unknown error',
2 => 'not available on a server',
3 => 'not available on a client',
4 => 'call is already accepted',
5 => 'call is already invoked',
6 => 'call is not yet invoked',
7 => 'call is already finished',
8 => 'outstanding read or write present',
9 => 'a bad flag was given'
}
}
end
@ -66,11 +65,9 @@ describe GRPC::Core::RpcErrors do
syms_and_codes = m.constants.collect { |c| [c, m.const_get(c)] }
expect(Hash[syms_and_codes]).to eq(@known_types)
end
end
describe GRPC::Core::Call do
before(:each) do
@tag = Object.new
@client_queue = GRPC::Core::CompletionQueue.new
@ -88,7 +85,7 @@ describe GRPC::Core::Call do
describe '#start_read' do
it 'should fail if called immediately' do
blk = Proc.new { make_test_call.start_read(@tag) }
blk = proc { make_test_call.start_read(@tag) }
expect(&blk).to raise_error GRPC::Core::CallError
end
end
@ -96,21 +93,21 @@ describe GRPC::Core::Call do
describe '#start_write' do
it 'should fail if called immediately' do
bytes = GRPC::Core::ByteBuffer.new('test string')
blk = Proc.new { make_test_call.start_write(bytes, @tag) }
blk = proc { make_test_call.start_write(bytes, @tag) }
expect(&blk).to raise_error GRPC::Core::CallError
end
end
describe '#start_write_status' do
it 'should fail if called immediately' do
blk = Proc.new { make_test_call.start_write_status(153, 'x', @tag) }
blk = proc { make_test_call.start_write_status(153, 'x', @tag) }
expect(&blk).to raise_error GRPC::Core::CallError
end
end
describe '#writes_done' do
it 'should fail if called immediately' do
blk = Proc.new { make_test_call.writes_done(Object.new) }
blk = proc { make_test_call.writes_done(Object.new) }
expect(&blk).to raise_error GRPC::Core::CallError
end
end
@ -119,7 +116,8 @@ describe GRPC::Core::Call do
it 'adds metadata to a call without fail' do
call = make_test_call
n = 37
metadata = Hash[n.times.collect { |i| ["key%d" % i, "value%d" %i] } ]
one_md = proc { |x| [sprintf('key%d', x), sprintf('value%d', x)] }
metadata = Hash[n.times.collect { |i| one_md.call i }]
expect { call.add_metadata(metadata) }.to_not raise_error
end
end
@ -174,7 +172,7 @@ describe GRPC::Core::Call do
describe '#metadata' do
it 'can save the metadata hash and read it back' do
call = make_test_call
md = {'k1' => 'v1', 'k2' => 'v2'}
md = { 'k1' => 'v1', 'k2' => 'v2' }
expect { call.metadata = md }.not_to raise_error
expect(call.metadata).to be(md)
end
@ -191,7 +189,6 @@ describe GRPC::Core::Call do
end
end
def make_test_call
@ch.create_call('dummy_method', 'dummy_host', deadline)
end
@ -199,5 +196,4 @@ describe GRPC::Core::Call do
def deadline
Time.now + 2 # in 2 seconds; arbitrary
end
end

@ -37,8 +37,6 @@ def load_test_certs
end
describe GRPC::Core::Channel do
def create_test_cert
GRPC::Core::Credentials.new(load_test_certs[0])
end
@ -48,7 +46,6 @@ describe GRPC::Core::Channel do
end
shared_examples '#new' do
it 'take a host name without channel args' do
expect { GRPC::Core::Channel.new('dummy_host', nil) }.not_to raise_error
end
@ -61,14 +58,14 @@ describe GRPC::Core::Channel do
end
it 'does not take a hash with bad values as channel args' do
blk = construct_with_args(:symbol => Object.new)
blk = construct_with_args(symbol: Object.new)
expect(&blk).to raise_error TypeError
blk = construct_with_args('1' => Hash.new)
expect(&blk).to raise_error TypeError
end
it 'can take a hash with a symbol key as channel args' do
blk = construct_with_args(:a_symbol => 1)
blk = construct_with_args(a_symbol: 1)
expect(&blk).to_not raise_error
end
@ -78,32 +75,30 @@ describe GRPC::Core::Channel do
end
it 'can take a hash with a string value as channel args' do
blk = construct_with_args(:a_symbol => '1')
blk = construct_with_args(a_symbol: '1')
expect(&blk).to_not raise_error
end
it 'can take a hash with a symbol value as channel args' do
blk = construct_with_args(:a_symbol => :another_symbol)
blk = construct_with_args(a_symbol: :another_symbol)
expect(&blk).to_not raise_error
end
it 'can take a hash with a numeric value as channel args' do
blk = construct_with_args(:a_symbol => 1)
blk = construct_with_args(a_symbol: 1)
expect(&blk).to_not raise_error
end
it 'can take a hash with many args as channel args' do
args = Hash[127.times.collect { |x| [x.to_s, x] } ]
args = Hash[127.times.collect { |x| [x.to_s, x] }]
blk = construct_with_args(args)
expect(&blk).to_not raise_error
end
end
describe '#new for secure channels' do
def construct_with_args(a)
Proc.new { GRPC::Core::Channel.new('dummy_host', a, create_test_cert) }
proc { GRPC::Core::Channel.new('dummy_host', a, create_test_cert) }
end
it_behaves_like '#new'
@ -113,7 +108,7 @@ describe GRPC::Core::Channel do
it_behaves_like '#new'
def construct_with_args(a)
Proc.new { GRPC::Core::Channel.new('dummy_host', a) }
proc { GRPC::Core::Channel.new('dummy_host', a) }
end
end
@ -125,7 +120,7 @@ describe GRPC::Core::Channel do
deadline = Time.now + 5
blk = Proc.new do
blk = proc do
ch.create_call('dummy_method', 'dummy_host', deadline)
end
expect(&blk).to_not raise_error
@ -138,12 +133,11 @@ describe GRPC::Core::Channel do
ch.close
deadline = Time.now + 5
blk = Proc.new do
blk = proc do
ch.create_call('dummy_method', 'dummy_host', deadline)
end
expect(&blk).to raise_error(RuntimeError)
end
end
describe '#destroy' do
@ -151,7 +145,7 @@ describe GRPC::Core::Channel do
port = find_unused_tcp_port
host = "localhost:#{port}"
ch = GRPC::Core::Channel.new(host, nil)
blk = Proc.new { ch.destroy }
blk = proc { ch.destroy }
expect(&blk).to_not raise_error
end
@ -159,18 +153,16 @@ describe GRPC::Core::Channel do
port = find_unused_tcp_port
host = "localhost:#{port}"
ch = GRPC::Core::Channel.new(host, nil)
blk = Proc.new { ch.destroy }
blk = proc { ch.destroy }
blk.call
expect(&blk).to_not raise_error
end
end
describe '::SSL_TARGET' do
it 'is a symbol' do
expect(GRPC::Core::Channel::SSL_TARGET).to be_a(Symbol)
end
end
describe '#close' do
@ -178,7 +170,7 @@ describe GRPC::Core::Channel do
port = find_unused_tcp_port
host = "localhost:#{port}"
ch = GRPC::Core::Channel.new(host, nil)
blk = Proc.new { ch.close }
blk = proc { ch.close }
expect(&blk).to_not raise_error
end
@ -186,10 +178,9 @@ describe GRPC::Core::Channel do
port = find_unused_tcp_port
host = "localhost:#{port}"
ch = GRPC::Core::Channel.new(host, nil)
blk = Proc.new { ch.close }
blk = proc { ch.close }
blk.call
expect(&blk).to_not raise_error
end
end
end

@ -41,7 +41,6 @@ def load_test_certs
end
shared_context 'setup: tags' do
before(:example) do
@server_finished_tag = Object.new
@client_finished_tag = Object.new
@ -71,7 +70,7 @@ shared_context 'setup: tags' do
expect(ev).not_to be_nil
expect(ev.type).to be(SERVER_RPC_NEW)
ev.call.server_accept(@server_queue, @server_finished_tag)
ev.call.server_end_initial_metadata()
ev.call.server_end_initial_metadata
ev.call.start_read(@server_tag)
ev = @server_queue.pluck(@server_tag, TimeConsts::INFINITE_FUTURE)
expect(ev.type).to be(READ)
@ -79,10 +78,10 @@ shared_context 'setup: tags' do
ev = @server_queue.pluck(@server_tag, TimeConsts::INFINITE_FUTURE)
expect(ev).not_to be_nil
expect(ev.type).to be(WRITE_ACCEPTED)
return ev.call
ev.call
end
def client_sends(call, sent='a message')
def client_sends(call, sent = 'a message')
req = ByteBuffer.new(sent)
call.start_invoke(@client_queue, @tag, @tag, @client_finished_tag)
ev = @client_queue.pluck(@tag, TimeConsts::INFINITE_FUTURE)
@ -92,17 +91,15 @@ shared_context 'setup: tags' do
ev = @client_queue.pluck(@tag, TimeConsts::INFINITE_FUTURE)
expect(ev).not_to be_nil
expect(ev.type).to be(WRITE_ACCEPTED)
return sent
sent
end
def new_client_call
@ch.create_call('/method', 'localhost', deadline)
end
end
shared_examples 'basic GRPC message delivery is OK' do
include_context 'setup: tags'
it 'servers receive requests from clients and start responding' do
@ -126,7 +123,7 @@ shared_examples 'basic GRPC message delivery is OK' do
# the server response
server_call.start_write(reply, @server_tag)
ev = expect_next_event_on(@server_queue, WRITE_ACCEPTED, @server_tag)
expect_next_event_on(@server_queue, WRITE_ACCEPTED, @server_tag)
end
it 'responses written by servers are received by the client' do
@ -135,15 +132,14 @@ shared_examples 'basic GRPC message delivery is OK' do
server_receives_and_responds_with('server_response')
call.start_read(@tag)
ev = expect_next_event_on(@client_queue, CLIENT_METADATA_READ, @tag)
expect_next_event_on(@client_queue, CLIENT_METADATA_READ, @tag)
ev = expect_next_event_on(@client_queue, READ, @tag)
expect(ev.result.to_s).to eq('server_response')
end
it 'servers can ignore a client write and send a status' do
reply = ByteBuffer.new('the server payload')
call = new_client_call
msg = client_sends(call)
client_sends(call)
# check the server rpc new was received
@server.request_call(@server_tag)
@ -153,20 +149,20 @@ shared_examples 'basic GRPC message delivery is OK' do
# accept the call - need to do this to sent status.
server_call = ev.call
server_call.server_accept(@server_queue, @server_finished_tag)
server_call.server_end_initial_metadata()
server_call.server_end_initial_metadata
server_call.start_write_status(StatusCodes::NOT_FOUND, 'not found',
@server_tag)
# client gets an empty response for the read, preceeded by some metadata.
call.start_read(@tag)
ev = expect_next_event_on(@client_queue, CLIENT_METADATA_READ, @tag)
expect_next_event_on(@client_queue, CLIENT_METADATA_READ, @tag)
ev = expect_next_event_on(@client_queue, READ, @tag)
expect(ev.tag).to be(@tag)
expect(ev.result.to_s).to eq('')
# finally, after client sends writes_done, they get the finished.
call.writes_done(@tag)
ev = expect_next_event_on(@client_queue, FINISH_ACCEPTED, @tag)
expect_next_event_on(@client_queue, FINISH_ACCEPTED, @tag)
ev = expect_next_event_on(@client_queue, FINISHED, @client_finished_tag)
expect(ev.result.code).to eq(StatusCodes::NOT_FOUND)
end
@ -175,12 +171,12 @@ shared_examples 'basic GRPC message delivery is OK' do
call = new_client_call
client_sends(call)
server_call = server_receives_and_responds_with('server_response')
server_call.start_write_status(10101, 'status code is 10101', @server_tag)
server_call.start_write_status(10_101, 'status code is 10101', @server_tag)
# first the client says writes are done
call.start_read(@tag)
ev = expect_next_event_on(@client_queue, CLIENT_METADATA_READ, @tag)
ev = expect_next_event_on(@client_queue, READ, @tag)
expect_next_event_on(@client_queue, CLIENT_METADATA_READ, @tag)
expect_next_event_on(@client_queue, READ, @tag)
call.writes_done(@tag)
# but nothing happens until the server sends a status
@ -192,24 +188,23 @@ shared_examples 'basic GRPC message delivery is OK' do
expect_next_event_on(@client_queue, FINISH_ACCEPTED, @tag)
ev = expect_next_event_on(@client_queue, FINISHED, @client_finished_tag)
expect(ev.result.details).to eq('status code is 10101')
expect(ev.result.code).to eq(10101)
expect(ev.result.code).to eq(10_101)
end
end
shared_examples 'GRPC metadata delivery works OK' do
include_context 'setup: tags'
describe 'from client => server' do
before(:example) do
n = 7 # arbitrary number of metadata
diff_keys = Hash[n.times.collect { |i| ['k%d' % i, 'v%d' % i] }]
null_vals = Hash[n.times.collect { |i| ['k%d' % i, 'v\0%d' % i] }]
same_keys = Hash[n.times.collect { |i| ['k%d' % i, ['v%d' % i] * n] }]
symbol_key = {:a_key => 'a val'}
diff_keys_fn = proc { |i| [sprintf('k%d', i), sprintf('v%d', i)] }
diff_keys = Hash[n.times.collect { |x| diff_keys_fn.call x }]
null_vals_fn = proc { |i| [sprintf('k%d', i), sprintf('v\0%d', i)] }
null_vals = Hash[n.times.collect { |x| null_vals_fn.call x }]
same_keys_fn = proc { |i| [sprintf('k%d', i), [sprintf('v%d', i)] * n] }
same_keys = Hash[n.times.collect { |x| same_keys_fn.call x }]
symbol_key = { a_key: 'a val' }
@valid_metadata = [diff_keys, same_keys, null_vals, symbol_key]
@bad_keys = []
@bad_keys << { Object.new => 'a value' }
@ -239,28 +234,29 @@ shared_examples 'GRPC metadata delivery works OK' do
# Client begins a call OK
call.start_invoke(@client_queue, @tag, @tag, @client_finished_tag)
ev = expect_next_event_on(@client_queue, INVOKE_ACCEPTED, @tag)
expect_next_event_on(@client_queue, INVOKE_ACCEPTED, @tag)
# ... server has all metadata available even though the client did not
# send a write
@server.request_call(@server_tag)
ev = expect_next_event_on(@server_queue, SERVER_RPC_NEW, @server_tag)
replace_symbols = Hash[md.each_pair.collect { |x,y| [x.to_s, y] }]
replace_symbols = Hash[md.each_pair.collect { |x, y| [x.to_s, y] }]
result = ev.result.metadata
expect(result.merge(replace_symbols)).to eq(result)
end
end
end
describe 'from server => client' do
before(:example) do
n = 7 # arbitrary number of metadata
diff_keys = Hash[n.times.collect { |i| ['k%d' % i, 'v%d' % i] }]
null_vals = Hash[n.times.collect { |i| ['k%d' % i, 'v\0%d' % i] }]
same_keys = Hash[n.times.collect { |i| ['k%d' % i, ['v%d' % i] * n] }]
symbol_key = {:a_key => 'a val'}
diff_keys_fn = proc { |i| [sprintf('k%d', i), sprintf('v%d', i)] }
diff_keys = Hash[n.times.collect { |x| diff_keys_fn.call x }]
null_vals_fn = proc { |i| [sprintf('k%d', i), sprintf('v\0%d', i)] }
null_vals = Hash[n.times.collect { |x| null_vals_fn.call x }]
same_keys_fn = proc { |i| [sprintf('k%d', i), [sprintf('v%d', i)] * n] }
same_keys = Hash[n.times.collect { |x| same_keys_fn.call x }]
symbol_key = { a_key: 'a val' }
@valid_metadata = [diff_keys, same_keys, null_vals, symbol_key]
@bad_keys = []
@bad_keys << { Object.new => 'a value' }
@ -290,7 +286,7 @@ shared_examples 'GRPC metadata delivery works OK' do
# ... server accepts the call without adding metadata
server_call.server_accept(@server_queue, @server_finished_tag)
server_call.server_end_initial_metadata()
server_call.server_end_initial_metadata
# ... these server sends some data, allowing the metadata read
server_call.start_write(ByteBuffer.new('reply with metadata'),
@ -300,7 +296,7 @@ shared_examples 'GRPC metadata delivery works OK' do
# there is the HTTP status metadata, though there should not be any
# TODO(temiola): update this with the bug number to be resolved
ev = expect_next_event_on(@client_queue, CLIENT_METADATA_READ, @tag)
expect(ev.result).to eq({':status' => '200'})
expect(ev.result).to eq(':status' => '200')
end
it 'sends all the pairs and status:200 when keys and values are valid' do
@ -316,24 +312,19 @@ shared_examples 'GRPC metadata delivery works OK' do
# ... server adds metadata and accepts the call
server_call.add_metadata(md)
server_call.server_accept(@server_queue, @server_finished_tag)
server_call.server_end_initial_metadata()
server_call.server_end_initial_metadata
# Now the client can read the metadata
ev = expect_next_event_on(@client_queue, CLIENT_METADATA_READ, @tag)
replace_symbols = Hash[md.each_pair.collect { |x,y| [x.to_s, y] }]
replace_symbols = Hash[md.each_pair.collect { |x, y| [x.to_s, y] }]
replace_symbols[':status'] = '200'
expect(ev.result).to eq(replace_symbols)
end
end
end
end
describe 'the http client/server' do
before(:example) do
port = find_unused_tcp_port
host = "localhost:#{port}"
@ -354,11 +345,9 @@ describe 'the http client/server' do
it_behaves_like 'GRPC metadata delivery works OK' do
end
end
describe 'the secure http client/server' do
before(:example) do
certs = load_test_certs
port = find_unused_tcp_port
@ -369,7 +358,7 @@ describe 'the secure http client/server' do
@server = GRPC::Core::Server.new(@server_queue, nil, server_creds)
@server.add_http2_port(host, true)
@server.start
args = {Channel::SSL_TARGET => 'foo.test.google.com'}
args = { Channel::SSL_TARGET => 'foo.test.google.com' }
@ch = Channel.new(host, args,
GRPC::Core::Credentials.new(certs[0], nil, nil))
end
@ -383,5 +372,4 @@ describe 'the secure http client/server' do
it_behaves_like 'GRPC metadata delivery works OK' do
end
end

@ -30,7 +30,6 @@
require 'grpc'
describe GRPC::Core::CompletionQueue do
describe '#new' do
it 'is constructed successufully' do
expect { GRPC::Core::CompletionQueue.new }.not_to raise_error
@ -53,7 +52,6 @@ describe GRPC::Core::CompletionQueue do
expect { ch.next(a_time) }.not_to raise_error
end
end
end
describe '#pluck' do
@ -74,8 +72,5 @@ describe GRPC::Core::CompletionQueue do
expect { ch.pluck(tag, a_time) }.not_to raise_error
end
end
end
end

@ -29,7 +29,6 @@
require 'grpc'
def load_test_certs
test_root = File.join(File.dirname(__FILE__), 'testdata')
files = ['ca.pem', 'server1.pem', 'server1.key']
@ -39,9 +38,7 @@ end
Credentials = GRPC::Core::Credentials
describe Credentials do
describe '#new' do
it 'can be constructed with fake inputs' do
expect { Credentials.new('root_certs', 'key', 'cert') }.not_to raise_error
end
@ -58,30 +55,23 @@ describe Credentials do
it 'cannot be constructed with a nil server roots' do
_, client_key, client_chain = load_test_certs
blk = Proc.new { Credentials.new(nil, client_key, client_chain) }
blk = proc { Credentials.new(nil, client_key, client_chain) }
expect(&blk).to raise_error
end
end
describe '#compose' do
it 'can be completed OK' do
certs = load_test_certs
cred1 = Credentials.new(*certs)
cred2 = Credentials.new(*certs)
expect { cred1.compose(cred2) }.to_not raise_error
end
end
describe 'Credentials#default' do
it 'is not implemented yet' do
expect { Credentials.default() }.to raise_error RuntimeError
expect { Credentials.default }.to raise_error RuntimeError
end
end
end

@ -30,25 +30,23 @@
require 'grpc'
describe GRPC::Core::CompletionType do
before(:each) do
@known_types = {
:QUEUE_SHUTDOWN => 0,
:READ => 1,
:INVOKE_ACCEPTED => 2,
:WRITE_ACCEPTED => 3,
:FINISH_ACCEPTED => 4,
:CLIENT_METADATA_READ => 5,
:FINISHED => 6,
:SERVER_RPC_NEW => 7,
:RESERVED => 8
QUEUE_SHUTDOWN: 0,
READ: 1,
INVOKE_ACCEPTED: 2,
WRITE_ACCEPTED: 3,
FINISH_ACCEPTED: 4,
CLIENT_METADATA_READ: 5,
FINISHED: 6,
SERVER_RPC_NEW: 7,
RESERVED: 8
}
end
it 'should have all the known types' do
mod = GRPC::Core::CompletionType
blk = Proc.new { Hash[mod.constants.collect { |c| [c, mod.const_get(c)] }] }
blk = proc { Hash[mod.constants.collect { |c| [c, mod.const_get(c)] }] }
expect(blk.call).to eq(@known_types)
end
end

@ -38,9 +38,9 @@ describe GRPC::ActiveCall do
CompletionType = GRPC::Core::CompletionType
before(:each) do
@pass_through = Proc.new { |x| x }
@pass_through = proc { |x| x }
@server_tag = Object.new
@server_done_tag, meta_tag = Object.new
@server_done_tag = Object.new
@tag = Object.new
@client_queue = GRPC::Core::CompletionQueue.new
@ -70,7 +70,7 @@ describe GRPC::ActiveCall do
describe '#multi_req_view' do
it 'exposes a fixed subset of the ActiveCall methods' do
want = ['cancelled', 'deadline', 'each_remote_read', 'shutdown']
want = %w(cancelled, deadline, each_remote_read, shutdown)
v = @client_call.multi_req_view
want.each do |w|
expect(v.methods.include?(w))
@ -80,7 +80,7 @@ describe GRPC::ActiveCall do
describe '#single_req_view' do
it 'exposes a fixed subset of the ActiveCall methods' do
want = ['cancelled', 'deadline', 'shutdown']
want = %w(cancelled, deadline, shutdown)
v = @client_call.single_req_view
want.each do |w|
expect(v.methods.include?(w))
@ -110,7 +110,7 @@ describe GRPC::ActiveCall do
# Accept the call, and verify that the server reads the response ok.
ev.call.server_accept(@client_queue, @server_tag)
ev.call.server_end_initial_metadata()
ev.call.server_end_initial_metadata
server_call = ActiveCall.new(ev.call, @client_queue, @pass_through,
@pass_through, deadline)
expect(server_call.remote_read).to eq(msg)
@ -120,7 +120,7 @@ describe GRPC::ActiveCall do
call = make_test_call
done_tag, meta_tag = ActiveCall.client_start_invoke(call, @client_queue,
deadline)
marshal = Proc.new { |x| 'marshalled:' + x }
marshal = proc { |x| 'marshalled:' + x }
client_call = ActiveCall.new(call, @client_queue, marshal,
@pass_through, deadline,
finished_tag: done_tag,
@ -132,33 +132,29 @@ describe GRPC::ActiveCall do
@server.request_call(@server_tag)
ev = @server_queue.next(deadline)
ev.call.server_accept(@client_queue, @server_tag)
ev.call.server_end_initial_metadata()
ev.call.server_end_initial_metadata
server_call = ActiveCall.new(ev.call, @client_queue, @pass_through,
@pass_through, deadline)
expect(server_call.remote_read).to eq('marshalled:' + msg)
end
end
describe '#client_start_invoke' do
it 'sends keywords as metadata to the server when the are present' do
call, pass_through = make_test_call, Proc.new { |x| x }
done_tag, meta_tag = ActiveCall.client_start_invoke(call, @client_queue,
deadline, k1: 'v1',
k2: 'v2')
call = make_test_call
ActiveCall.client_start_invoke(call, @client_queue, deadline,
k1: 'v1', k2: 'v2')
@server.request_call(@server_tag)
ev = @server_queue.next(deadline)
expect(ev).to_not be_nil
expect(ev.result.metadata['k1']).to eq('v1')
expect(ev.result.metadata['k2']).to eq('v2')
end
end
describe '#remote_read' do
it 'reads the response sent by a server' do
call, pass_through = make_test_call, Proc.new { |x| x }
call = make_test_call
done_tag, meta_tag = ActiveCall.client_start_invoke(call, @client_queue,
deadline)
client_call = ActiveCall.new(call, @client_queue, @pass_through,
@ -173,7 +169,7 @@ describe GRPC::ActiveCall do
end
it 'saves metadata { status=200 } when the server adds no metadata' do
call, pass_through = make_test_call, Proc.new { |x| x }
call = make_test_call
done_tag, meta_tag = ActiveCall.client_start_invoke(call, @client_queue,
deadline)
client_call = ActiveCall.new(call, @client_queue, @pass_through,
@ -186,11 +182,11 @@ describe GRPC::ActiveCall do
server_call.remote_send('ignore me')
expect(client_call.metadata).to be_nil
client_call.remote_read
expect(client_call.metadata).to eq({':status' => '200'})
expect(client_call.metadata).to eq(':status' => '200')
end
it 'saves metadata add by the server' do
call, pass_through = make_test_call, Proc.new { |x| x }
call = make_test_call
done_tag, meta_tag = ActiveCall.client_start_invoke(call, @client_queue,
deadline)
client_call = ActiveCall.new(call, @client_queue, @pass_through,
@ -203,13 +199,12 @@ describe GRPC::ActiveCall do
server_call.remote_send('ignore me')
expect(client_call.metadata).to be_nil
client_call.remote_read
expect(client_call.metadata).to eq({':status' => '200', 'k1' => 'v1',
'k2' => 'v2'})
expected = { ':status' => '200', 'k1' => 'v1', 'k2' => 'v2' }
expect(client_call.metadata).to eq(expected)
end
it 'get a nil msg before a status when an OK status is sent' do
call, pass_through = make_test_call, Proc.new { |x| x }
call = make_test_call
done_tag, meta_tag = ActiveCall.client_start_invoke(call, @client_queue,
deadline)
client_call = ActiveCall.new(call, @client_queue, @pass_through,
@ -227,12 +222,11 @@ describe GRPC::ActiveCall do
expect(res).to be_nil
end
it 'unmarshals the response using the unmarshal func' do
call = make_test_call
done_tag, meta_tag = ActiveCall.client_start_invoke(call, @client_queue,
deadline)
unmarshal = Proc.new { |x| 'unmarshalled:' + x }
unmarshal = proc { |x| 'unmarshalled:' + x }
client_call = ActiveCall.new(call, @client_queue, @pass_through,
unmarshal, deadline,
finished_tag: done_tag,
@ -245,7 +239,6 @@ describe GRPC::ActiveCall do
server_call.remote_send('server_response')
expect(client_call.remote_read).to eq('unmarshalled:server_response')
end
end
describe '#each_remote_read' do
@ -298,7 +291,6 @@ describe GRPC::ActiveCall do
server_call.send_status(OK, 'OK')
expect { e.next }.to raise_error(StopIteration)
end
end
describe '#writes_done' do
@ -357,7 +349,6 @@ describe GRPC::ActiveCall do
expect { client_call.writes_done(true) }.to_not raise_error
expect { server_call.finished }.to_not raise_error
end
end
def expect_server_to_receive(sent_text, **kw)
@ -371,7 +362,7 @@ describe GRPC::ActiveCall do
ev = @server_queue.next(deadline)
ev.call.add_metadata(kw)
ev.call.server_accept(@client_queue, @server_done_tag)
ev.call.server_end_initial_metadata()
ev.call.server_end_initial_metadata
ActiveCall.new(ev.call, @client_queue, @pass_through,
@pass_through, deadline,
finished_tag: @server_done_tag)
@ -384,5 +375,4 @@ describe GRPC::ActiveCall do
def deadline
Time.now + 0.25 # in 0.25 seconds; arbitrary
end
end

@ -31,7 +31,7 @@ require 'grpc'
require 'xray/thread_dump_signal_handler'
require_relative '../port_picker'
NOOP = Proc.new { |x| x }
NOOP = proc { |x| x }
def wakey_thread(&blk)
awake_mutex, awake_cond = Mutex.new, ConditionVariable.new
@ -52,7 +52,6 @@ include GRPC::Core::StatusCodes
include GRPC::Core::TimeConsts
describe 'ClientStub' do
before(:each) do
Thread.abort_on_exception = true
@server = nil
@ -67,11 +66,10 @@ describe 'ClientStub' do
end
describe '#new' do
it 'can be created from a host and args' do
host = new_test_host
opts = {:a_channel_arg => 'an_arg'}
blk = Proc.new do
opts = { a_channel_arg: 'an_arg' }
blk = proc do
GRPC::ClientStub.new(host, @cq, **opts)
end
expect(&blk).not_to raise_error
@ -79,8 +77,8 @@ describe 'ClientStub' do
it 'can be created with a default deadline' do
host = new_test_host
opts = {:a_channel_arg => 'an_arg', :deadline => 5}
blk = Proc.new do
opts = { a_channel_arg: 'an_arg', deadline: 5 }
blk = proc do
GRPC::ClientStub.new(host, @cq, **opts)
end
expect(&blk).not_to raise_error
@ -88,8 +86,8 @@ describe 'ClientStub' do
it 'can be created with an channel override' do
host = new_test_host
opts = {:a_channel_arg => 'an_arg', :channel_override => @ch}
blk = Proc.new do
opts = { a_channel_arg: 'an_arg', channel_override: @ch }
blk = proc do
GRPC::ClientStub.new(host, @cq, **opts)
end
expect(&blk).not_to raise_error
@ -97,8 +95,8 @@ describe 'ClientStub' do
it 'cannot be created with a bad channel override' do
host = new_test_host
blk = Proc.new do
opts = {:a_channel_arg => 'an_arg', :channel_override => Object.new}
blk = proc do
opts = { a_channel_arg: 'an_arg', channel_override: Object.new }
GRPC::ClientStub.new(host, @cq, **opts)
end
expect(&blk).to raise_error
@ -106,8 +104,8 @@ describe 'ClientStub' do
it 'cannot be created with bad credentials' do
host = new_test_host
blk = Proc.new do
opts = {:a_channel_arg => 'an_arg', :creds => Object.new}
blk = proc do
opts = { a_channel_arg: 'an_arg', creds: Object.new }
GRPC::ClientStub.new(host, @cq, **opts)
end
expect(&blk).to raise_error
@ -116,17 +114,16 @@ describe 'ClientStub' do
it 'can be created with test test credentials' do
certs = load_test_certs
host = new_test_host
blk = Proc.new do
blk = proc do
opts = {
GRPC::Core::Channel::SSL_TARGET => 'foo.test.google.com',
:a_channel_arg => 'an_arg',
:creds => GRPC::Core::Credentials.new(certs[0], nil, nil)
a_channel_arg: 'an_arg',
creds: GRPC::Core::Credentials.new(certs[0], nil, nil)
}
GRPC::ClientStub.new(host, @cq, **opts)
end
expect(&blk).to_not raise_error
end
end
describe '#request_response' do
@ -135,7 +132,6 @@ describe 'ClientStub' do
end
shared_examples 'request response' do
it 'should send a request to/receive a reply from a server' do
host = new_test_host
th = run_request_response(host, @sent_msg, @resp, @pass)
@ -146,8 +142,8 @@ describe 'ClientStub' do
it 'should send metadata to the server ok' do
host = new_test_host
th = run_request_response(host, @sent_msg, @resp, @pass, k1: 'v1',
k2: 'v2')
th = run_request_response(host, @sent_msg, @resp, @pass,
k1: 'v1', k2: 'v2')
stub = GRPC::ClientStub.new(host, @cq)
expect(get_response(stub)).to eq(@resp)
th.join
@ -157,7 +153,10 @@ describe 'ClientStub' do
host = new_test_host
th = run_request_response(host, @sent_msg, @resp, @pass,
k1: 'updated-v1', k2: 'v2')
update_md = Proc.new { |md| md[:k1] = 'updated-v1'; md }
update_md = proc do |md|
md[:k1] = 'updated-v1'
md
end
stub = GRPC::ClientStub.new(host, @cq, update_metadata: update_md)
expect(get_response(stub)).to eq(@resp)
th.join
@ -167,7 +166,7 @@ describe 'ClientStub' do
alt_host = new_test_host
th = run_request_response(alt_host, @sent_msg, @resp, @pass)
ch = GRPC::Core::Channel.new(alt_host, nil)
stub = GRPC::ClientStub.new('ignored-host', @cq, channel_override:ch)
stub = GRPC::ClientStub.new('ignored-host', @cq, channel_override: ch)
expect(get_response(stub)).to eq(@resp)
th.join
end
@ -176,45 +175,37 @@ describe 'ClientStub' do
host = new_test_host
th = run_request_response(host, @sent_msg, @resp, @fail)
stub = GRPC::ClientStub.new(host, @cq)
blk = Proc.new { get_response(stub) }
blk = proc { get_response(stub) }
expect(&blk).to raise_error(GRPC::BadStatus)
th.join
end
end
describe 'without a call operation' do
def get_response(stub)
stub.request_response(@method, @sent_msg, NOOP, NOOP, k1: 'v1',
k2: 'v2')
stub.request_response(@method, @sent_msg, NOOP, NOOP,
k1: 'v1', k2: 'v2')
end
it_behaves_like 'request response'
end
describe 'via a call operation' do
def get_response(stub)
op = stub.request_response(@method, @sent_msg, NOOP, NOOP,
return_op:true, k1: 'v1', k2: 'v2')
return_op: true, k1: 'v1', k2: 'v2')
expect(op).to be_a(GRPC::ActiveCall::Operation)
op.execute()
op.execute
end
it_behaves_like 'request response'
end
end
describe '#client_streamer' do
shared_examples 'client streaming' do
before(:each) do
@sent_msgs = Array.new(3) { |i| 'msg_' + (i+1).to_s }
@sent_msgs = Array.new(3) { |i| 'msg_' + (i + 1).to_s }
@resp = 'a_reply'
end
@ -228,19 +219,21 @@ describe 'ClientStub' do
it 'should send metadata to the server ok' do
host = new_test_host
th = run_client_streamer(host, @sent_msgs, @resp, @pass, k1: 'v1',
k2: 'v2')
th = run_client_streamer(host, @sent_msgs, @resp, @pass,
k1: 'v1', k2: 'v2')
stub = GRPC::ClientStub.new(host, @cq)
expect(get_response(stub)).to eq(@resp)
th.join
end
it 'should update the sent metadata with a provided metadata updater' do
host = new_test_host
th = run_client_streamer(host, @sent_msgs, @resp, @pass,
k1: 'updated-v1', k2: 'v2')
update_md = Proc.new { |md| md[:k1] = 'updated-v1'; md }
update_md = proc do |md|
md[:k1] = 'updated-v1'
md
end
stub = GRPC::ClientStub.new(host, @cq, update_metadata: update_md)
expect(get_response(stub)).to eq(@resp)
th.join
@ -250,46 +243,38 @@ describe 'ClientStub' do
host = new_test_host
th = run_client_streamer(host, @sent_msgs, @resp, @fail)
stub = GRPC::ClientStub.new(host, @cq)
blk = Proc.new { get_response(stub) }
blk = proc { get_response(stub) }
expect(&blk).to raise_error(GRPC::BadStatus)
th.join
end
end
describe 'without a call operation' do
def get_response(stub)
stub.client_streamer(@method, @sent_msgs, NOOP, NOOP, k1: 'v1',
k2: 'v2')
stub.client_streamer(@method, @sent_msgs, NOOP, NOOP,
k1: 'v1', k2: 'v2')
end
it_behaves_like 'client streaming'
end
describe 'via a call operation' do
def get_response(stub)
op = stub.client_streamer(@method, @sent_msgs, NOOP, NOOP,
return_op:true, k1: 'v1', k2: 'v2')
return_op: true, k1: 'v1', k2: 'v2')
expect(op).to be_a(GRPC::ActiveCall::Operation)
resp = op.execute()
op.execute
end
it_behaves_like 'client streaming'
end
end
describe '#server_streamer' do
shared_examples 'server streaming' do
before(:each) do
@sent_msg = 'a_msg'
@replys = Array.new(3) { |i| 'reply_' + (i+1).to_s }
@replys = Array.new(3) { |i| 'reply_' + (i + 1).to_s }
end
it 'should send a request to/receive replies from a server' do
@ -311,8 +296,8 @@ describe 'ClientStub' do
it 'should send metadata to the server ok' do
host = new_test_host
th = run_server_streamer(host, @sent_msg, @replys, @fail, k1: 'v1',
k2: 'v2')
th = run_server_streamer(host, @sent_msg, @replys, @fail,
k1: 'v1', k2: 'v2')
stub = GRPC::ClientStub.new(host, @cq)
e = get_responses(stub)
expect { e.collect { |r| r } }.to raise_error(GRPC::BadStatus)
@ -323,55 +308,50 @@ describe 'ClientStub' do
host = new_test_host
th = run_server_streamer(host, @sent_msg, @replys, @pass,
k1: 'updated-v1', k2: 'v2')
update_md = Proc.new { |md| md[:k1] = 'updated-v1'; md }
update_md = proc do |md|
md[:k1] = 'updated-v1'
md
end
stub = GRPC::ClientStub.new(host, @cq, update_metadata: update_md)
e = get_responses(stub)
expect(e.collect { |r| r }).to eq(@replys)
th.join
end
end
describe 'without a call operation' do
def get_responses(stub)
e = stub.server_streamer(@method, @sent_msg, NOOP, NOOP, k1: 'v1',
k2: 'v2')
e = stub.server_streamer(@method, @sent_msg, NOOP, NOOP,
k1: 'v1', k2: 'v2')
expect(e).to be_a(Enumerator)
e
end
it_behaves_like 'server streaming'
end
describe 'via a call operation' do
def get_responses(stub)
op = stub.server_streamer(@method, @sent_msg, NOOP, NOOP,
return_op:true, k1: 'v1', k2: 'v2')
return_op: true, k1: 'v1', k2: 'v2')
expect(op).to be_a(GRPC::ActiveCall::Operation)
e = op.execute()
e = op.execute
expect(e).to be_a(Enumerator)
e
end
it_behaves_like 'server streaming'
end
end
describe '#bidi_streamer' do
shared_examples 'bidi streaming' do
before(:each) do
@sent_msgs = Array.new(3) { |i| 'msg_' + (i+1).to_s }
@replys = Array.new(3) { |i| 'reply_' + (i+1).to_s }
@sent_msgs = Array.new(3) { |i| 'msg_' + (i + 1).to_s }
@replys = Array.new(3) { |i| 'reply_' + (i + 1).to_s }
end
it 'supports sending all the requests first', :bidi => true do
it 'supports sending all the requests first', bidi: true do
host = new_test_host
th = run_bidi_streamer_handle_inputs_first(host, @sent_msgs, @replys,
@pass)
@ -381,7 +361,7 @@ describe 'ClientStub' do
th.join
end
it 'supports client-initiated ping pong', :bidi => true do
it 'supports client-initiated ping pong', bidi: true do
host = new_test_host
th = run_bidi_streamer_echo_ping_pong(host, @sent_msgs, @pass, true)
stub = GRPC::ClientStub.new(host, @cq)
@ -396,7 +376,7 @@ describe 'ClientStub' do
# servers don't know if all the client metadata has been sent until
# they receive a message from the client. Without receiving all the
# metadata, the server does not accept the call, so this test hangs.
xit 'supports a server-initiated ping pong', :bidi => true do
xit 'supports a server-initiated ping pong', bidi: true do
host = new_test_host
th = run_bidi_streamer_echo_ping_pong(host, @sent_msgs, @pass, false)
stub = GRPC::ClientStub.new(host, @cq)
@ -404,11 +384,9 @@ describe 'ClientStub' do
expect(e.collect { |r| r }).to eq(@sent_msgs)
th.join
end
end
describe 'without a call operation' do
def get_responses(stub)
e = stub.bidi_streamer(@method, @sent_msgs, NOOP, NOOP)
expect(e).to be_a(Enumerator)
@ -416,13 +394,12 @@ describe 'ClientStub' do
end
it_behaves_like 'bidi streaming'
end
describe 'via a call operation' do
def get_responses(stub)
op = stub.bidi_streamer(@method, @sent_msgs, NOOP, NOOP, return_op:true)
op = stub.bidi_streamer(@method, @sent_msgs, NOOP, NOOP,
return_op: true)
expect(op).to be_a(GRPC::ActiveCall::Operation)
e = op.execute
expect(e).to be_a(Enumerator)
@ -430,9 +407,7 @@ describe 'ClientStub' do
end
it_behaves_like 'bidi streaming'
end
end
def run_server_streamer(hostname, expected_input, replys, status, **kw)
@ -514,14 +489,13 @@ describe 'ClientStub' do
def expect_server_to_be_invoked(hostname, awake_mutex, awake_cond)
server_queue = start_test_server(hostname, awake_mutex, awake_cond)
test_deadline = Time.now + 10 # fail tests after 10 seconds
ev = server_queue.pluck(@server_tag, INFINITE_FUTURE)
raise OutOfTime if ev.nil?
fail OutOfTime if ev.nil?
server_call = ev.call
server_call.metadata = ev.result.metadata
finished_tag = Object.new
server_call.server_accept(server_queue, finished_tag)
server_call.server_end_initial_metadata()
server_call.server_end_initial_metadata
GRPC::ActiveCall.new(server_call, server_queue, NOOP, NOOP, INFINITE_FUTURE,
finished_tag: finished_tag)
end
@ -530,5 +504,4 @@ describe 'ClientStub' do
port = find_unused_tcp_port
"localhost:#{port}"
end
end

@ -30,9 +30,7 @@
require 'grpc'
require 'grpc/generic/rpc_desc'
describe GRPC::RpcDesc do
RpcDesc = GRPC::RpcDesc
Stream = RpcDesc::Stream
OK = GRPC::Core::StatusCodes::OK
@ -56,7 +54,6 @@ describe GRPC::RpcDesc do
end
describe '#run_server_method' do
describe 'for request responses' do
before(:each) do
@call = double('active_call')
@ -78,7 +75,7 @@ describe GRPC::RpcDesc do
it 'absorbs EventError with no further action' do
expect(@call).to receive(:remote_read).once.and_raise(EventError)
blk = Proc.new do
blk = proc do
@request_response.run_server_method(@call, method(:fake_reqresp))
end
expect(&blk).to_not raise_error
@ -86,7 +83,7 @@ describe GRPC::RpcDesc do
it 'absorbs CallError with no further action' do
expect(@call).to receive(:remote_read).once.and_raise(CallError)
blk = Proc.new do
blk = proc do
@request_response.run_server_method(@call, method(:fake_reqresp))
end
expect(&blk).to_not raise_error
@ -100,7 +97,6 @@ describe GRPC::RpcDesc do
expect(@call).to receive(:finished).once
@request_response.run_server_method(@call, method(:fake_reqresp))
end
end
describe 'for client streamers' do
@ -122,7 +118,7 @@ describe GRPC::RpcDesc do
it 'absorbs EventError with no further action' do
expect(@call).to receive(:remote_send).once.and_raise(EventError)
blk = Proc.new do
blk = proc do
@client_streamer.run_server_method(@call, method(:fake_clstream))
end
expect(&blk).to_not raise_error
@ -130,20 +126,18 @@ describe GRPC::RpcDesc do
it 'absorbs CallError with no further action' do
expect(@call).to receive(:remote_send).once.and_raise(CallError)
blk = Proc.new do
blk = proc do
@client_streamer.run_server_method(@call, method(:fake_clstream))
end
expect(&blk).to_not raise_error
end
it 'sends a response and closes the stream if there no errors' do
req = Object.new
expect(@call).to receive(:remote_send).once.with(@ok_response)
expect(@call).to receive(:send_status).once.with(OK, 'OK')
expect(@call).to receive(:finished).once
@client_streamer.run_server_method(@call, method(:fake_clstream))
end
end
describe 'for server streaming' do
@ -167,7 +161,7 @@ describe GRPC::RpcDesc do
it 'absorbs EventError with no further action' do
expect(@call).to receive(:remote_read).once.and_raise(EventError)
blk = Proc.new do
blk = proc do
@server_streamer.run_server_method(@call, method(:fake_svstream))
end
expect(&blk).to_not raise_error
@ -175,7 +169,7 @@ describe GRPC::RpcDesc do
it 'absorbs CallError with no further action' do
expect(@call).to receive(:remote_read).once.and_raise(CallError)
blk = Proc.new do
blk = proc do
@server_streamer.run_server_method(@call, method(:fake_svstream))
end
expect(&blk).to_not raise_error
@ -189,7 +183,6 @@ describe GRPC::RpcDesc do
expect(@call).to receive(:finished).once
@server_streamer.run_server_method(@call, method(:fake_svstream))
end
end
describe 'for bidi streamers' do
@ -215,30 +208,27 @@ describe GRPC::RpcDesc do
end
it 'closes the stream if there no errors' do
req = Object.new
expect(@call).to receive(:run_server_bidi)
expect(@call).to receive(:send_status).once.with(OK, 'OK')
expect(@call).to receive(:finished).once
@bidi_streamer.run_server_method(@call, method(:fake_bidistream))
end
end
end
describe '#assert_arity_matches' do
def no_arg
end
def fake_clstream(arg)
def fake_clstream(_arg)
end
def fake_svstream(arg1, arg2)
def fake_svstream(_arg1, _arg2)
end
it 'raises when a request_response does not have 2 args' do
[:fake_clstream, :no_arg].each do |mth|
blk = Proc.new do
blk = proc do
@request_response.assert_arity_matches(method(mth))
end
expect(&blk).to raise_error
@ -246,7 +236,7 @@ describe GRPC::RpcDesc do
end
it 'passes when a request_response has 2 args' do
blk = Proc.new do
blk = proc do
@request_response.assert_arity_matches(method(:fake_svstream))
end
expect(&blk).to_not raise_error
@ -254,7 +244,7 @@ describe GRPC::RpcDesc do
it 'raises when a server_streamer does not have 2 args' do
[:fake_clstream, :no_arg].each do |mth|
blk = Proc.new do
blk = proc do
@server_streamer.assert_arity_matches(method(mth))
end
expect(&blk).to raise_error
@ -262,7 +252,7 @@ describe GRPC::RpcDesc do
end
it 'passes when a server_streamer has 2 args' do
blk = Proc.new do
blk = proc do
@server_streamer.assert_arity_matches(method(:fake_svstream))
end
expect(&blk).to_not raise_error
@ -270,7 +260,7 @@ describe GRPC::RpcDesc do
it 'raises when a client streamer does not have 1 arg' do
[:fake_svstream, :no_arg].each do |mth|
blk = Proc.new do
blk = proc do
@client_streamer.assert_arity_matches(method(mth))
end
expect(&blk).to raise_error
@ -278,16 +268,15 @@ describe GRPC::RpcDesc do
end
it 'passes when a client_streamer has 1 arg' do
blk = Proc.new do
blk = proc do
@client_streamer.assert_arity_matches(method(:fake_clstream))
end
expect(&blk).to_not raise_error
end
it 'raises when a bidi streamer does not have 1 arg' do
[:fake_svstream, :no_arg].each do |mth|
blk = Proc.new do
blk = proc do
@bidi_streamer.assert_arity_matches(method(mth))
end
expect(&blk).to raise_error
@ -295,88 +284,78 @@ describe GRPC::RpcDesc do
end
it 'passes when a bidi streamer has 1 arg' do
blk = Proc.new do
blk = proc do
@bidi_streamer.assert_arity_matches(method(:fake_clstream))
end
expect(&blk).to_not raise_error
end
end
describe '#is_request_response?' do
describe '#request_response?' do
it 'is true only input and output are both not Streams' do
expect(@request_response.is_request_response?).to be(true)
expect(@client_streamer.is_request_response?).to be(false)
expect(@bidi_streamer.is_request_response?).to be(false)
expect(@server_streamer.is_request_response?).to be(false)
expect(@request_response.request_response?).to be(true)
expect(@client_streamer.request_response?).to be(false)
expect(@bidi_streamer.request_response?).to be(false)
expect(@server_streamer.request_response?).to be(false)
end
end
describe '#is_client_streamer?' do
describe '#client_streamer?' do
it 'is true only when input is a Stream and output is not a Stream' do
expect(@client_streamer.is_client_streamer?).to be(true)
expect(@request_response.is_client_streamer?).to be(false)
expect(@server_streamer.is_client_streamer?).to be(false)
expect(@bidi_streamer.is_client_streamer?).to be(false)
expect(@client_streamer.client_streamer?).to be(true)
expect(@request_response.client_streamer?).to be(false)
expect(@server_streamer.client_streamer?).to be(false)
expect(@bidi_streamer.client_streamer?).to be(false)
end
end
describe '#is_server_streamer?' do
describe '#server_streamer?' do
it 'is true only when output is a Stream and input is not a Stream' do
expect(@server_streamer.is_server_streamer?).to be(true)
expect(@client_streamer.is_server_streamer?).to be(false)
expect(@request_response.is_server_streamer?).to be(false)
expect(@bidi_streamer.is_server_streamer?).to be(false)
expect(@server_streamer.server_streamer?).to be(true)
expect(@client_streamer.server_streamer?).to be(false)
expect(@request_response.server_streamer?).to be(false)
expect(@bidi_streamer.server_streamer?).to be(false)
end
end
describe '#is_bidi_streamer?' do
describe '#bidi_streamer?' do
it 'is true only when output is a Stream and input is a Stream' do
expect(@bidi_streamer.is_bidi_streamer?).to be(true)
expect(@server_streamer.is_bidi_streamer?).to be(false)
expect(@client_streamer.is_bidi_streamer?).to be(false)
expect(@request_response.is_bidi_streamer?).to be(false)
expect(@bidi_streamer.bidi_streamer?).to be(true)
expect(@server_streamer.bidi_streamer?).to be(false)
expect(@client_streamer.bidi_streamer?).to be(false)
expect(@request_response.bidi_streamer?).to be(false)
end
end
def fake_reqresp(req, call)
def fake_reqresp(_req, _call)
@ok_response
end
def fake_clstream(call)
def fake_clstream(_call)
@ok_response
end
def fake_svstream(req, call)
def fake_svstream(_req, _call)
[@ok_response, @ok_response]
end
def fake_bidistream(an_array)
return an_array
an_array
end
def bad_status(req, call)
raise GRPC::BadStatus.new(@bs_code, 'NOK')
def bad_status(_req, _call)
fail GRPC::BadStatus.new(@bs_code, 'NOK')
end
def other_error(req, call)
raise ArgumentError.new('other error')
def other_error(_req, _call)
fail(ArgumentError, 'other error')
end
def bad_status_alt(call)
raise GRPC::BadStatus.new(@bs_code, 'NOK')
def bad_status_alt(_call)
fail GRPC::BadStatus.new(@bs_code, 'NOK')
end
def other_error_alt(call)
raise ArgumentError.new('other error')
def other_error_alt(_call)
fail(ArgumentError, 'other error')
end
end

@ -33,9 +33,7 @@ require 'xray/thread_dump_signal_handler'
Pool = GRPC::RpcServer::Pool
describe Pool do
describe '#new' do
it 'raises if a non-positive size is used' do
expect { Pool.new(0) }.to raise_error
expect { Pool.new(-1) }.to raise_error
@ -45,11 +43,9 @@ describe Pool do
it 'is constructed OK with a positive size' do
expect { Pool.new(1) }.not_to raise_error
end
end
describe '#jobs_waiting' do
it 'at start, it is zero' do
p = Pool.new(1)
expect(p.jobs_waiting).to be(0)
@ -57,74 +53,67 @@ describe Pool do
it 'it increases, with each scheduled job if the pool is not running' do
p = Pool.new(1)
job = Proc.new { }
job = proc {}
expect(p.jobs_waiting).to be(0)
5.times do |i|
p.schedule(&job)
expect(p.jobs_waiting).to be(i + 1)
end
end
it 'it decreases as jobs are run' do
p = Pool.new(1)
job = Proc.new { }
job = proc {}
expect(p.jobs_waiting).to be(0)
3.times do |i|
3.times do
p.schedule(&job)
end
p.start
sleep 2
expect(p.jobs_waiting).to be(0)
end
end
describe '#schedule' do
it 'throws if the pool is already stopped' do
p = Pool.new(1)
p.stop()
job = Proc.new { }
p.stop
job = proc {}
expect { p.schedule(&job) }.to raise_error
end
it 'adds jobs that get run by the pool' do
p = Pool.new(1)
p.start()
p.start
o, q = Object.new, Queue.new
job = Proc.new { q.push(o) }
job = proc { q.push(o) }
p.schedule(&job)
expect(q.pop).to be(o)
p.stop
end
end
describe '#stop' do
it 'works when there are no scheduled tasks' do
p = Pool.new(1)
expect { p.stop() }.not_to raise_error
expect { p.stop }.not_to raise_error
end
it 'stops jobs when there are long running jobs' do
p = Pool.new(1)
p.start()
p.start
o, q = Object.new, Queue.new
job = Proc.new do
job = proc do
sleep(5) # long running
q.push(o)
end
p.schedule(&job)
sleep(1) # should ensure the long job gets scheduled
expect { p.stop() }.not_to raise_error
expect { p.stop }.not_to raise_error
end
end
describe '#start' do
it 'runs pre-scheduled jobs' do
p = Pool.new(2)
o, q = Object.new, Queue.new
@ -146,7 +135,5 @@ describe Pool do
end
p.stop
end
end
end

@ -37,33 +37,37 @@ def load_test_certs
files.map { |f| File.open(File.join(test_root, f)).read }
end
# A test message
class EchoMsg
def self.marshal(o)
def self.marshal(_o)
''
end
def self.unmarshal(o)
def self.unmarshal(_o)
EchoMsg.new
end
end
# A test service with no methods.
class EmptyService
include GRPC::GenericService
end
# A test service without an implementation.
class NoRpcImplementation
include GRPC::GenericService
rpc :an_rpc, EchoMsg, EchoMsg
end
# A test service with an implementation.
class EchoService
include GRPC::GenericService
rpc :an_rpc, EchoMsg, EchoMsg
def initialize(default_var='ignored')
def initialize(_default_var = 'ignored')
end
def an_rpc(req, call)
def an_rpc(req, _call)
logger.info('echo service received a request')
req
end
@ -71,14 +75,15 @@ end
EchoStub = EchoService.rpc_stub_class
# A slow test service.
class SlowService
include GRPC::GenericService
rpc :an_rpc, EchoMsg, EchoMsg
def initialize(default_var='ignored')
def initialize(_default_var = 'ignored')
end
def an_rpc(req, call)
def an_rpc(req, _call)
delay = 0.25
logger.info("starting a slow #{delay} rpc")
sleep delay
@ -89,7 +94,6 @@ end
SlowStub = SlowService.rpc_stub_class
describe GRPC::RpcServer do
RpcServer = GRPC::RpcServer
StatusCodes = GRPC::Core::StatusCodes
@ -97,7 +101,7 @@ describe GRPC::RpcServer do
@method = 'an_rpc_method'
@pass = 0
@fail = 1
@noop = Proc.new { |x| x }
@noop = proc { |x| x }
@server_queue = GRPC::Core::CompletionQueue.new
port = find_unused_tcp_port
@ -112,18 +116,17 @@ describe GRPC::RpcServer do
end
describe '#new' do
it 'can be created with just some args' do
opts = {:a_channel_arg => 'an_arg'}
blk = Proc.new do
opts = { a_channel_arg: 'an_arg' }
blk = proc do
RpcServer.new(**opts)
end
expect(&blk).not_to raise_error
end
it 'can be created with a default deadline' do
opts = {:a_channel_arg => 'an_arg', :deadline => 5}
blk = Proc.new do
opts = { a_channel_arg: 'an_arg', deadline: 5 }
blk = proc do
RpcServer.new(**opts)
end
expect(&blk).not_to raise_error
@ -131,20 +134,20 @@ describe GRPC::RpcServer do
it 'can be created with a completion queue override' do
opts = {
:a_channel_arg => 'an_arg',
:completion_queue_override => @server_queue
a_channel_arg: 'an_arg',
completion_queue_override: @server_queue
}
blk = Proc.new do
blk = proc do
RpcServer.new(**opts)
end
expect(&blk).not_to raise_error
end
it 'cannot be created with a bad completion queue override' do
blk = Proc.new do
blk = proc do
opts = {
:a_channel_arg => 'an_arg',
:completion_queue_override => Object.new
a_channel_arg: 'an_arg',
completion_queue_override: Object.new
}
RpcServer.new(**opts)
end
@ -152,10 +155,10 @@ describe GRPC::RpcServer do
end
it 'cannot be created with invalid ServerCredentials' do
blk = Proc.new do
blk = proc do
opts = {
:a_channel_arg => 'an_arg',
:creds => Object.new
a_channel_arg: 'an_arg',
creds: Object.new
}
RpcServer.new(**opts)
end
@ -165,10 +168,10 @@ describe GRPC::RpcServer do
it 'can be created with the creds as valid ServerCedentials' do
certs = load_test_certs
server_creds = GRPC::Core::ServerCredentials.new(nil, certs[1], certs[2])
blk = Proc.new do
blk = proc do
opts = {
:a_channel_arg => 'an_arg',
:creds => server_creds
a_channel_arg: 'an_arg',
creds: server_creds
}
RpcServer.new(**opts)
end
@ -176,30 +179,28 @@ describe GRPC::RpcServer do
end
it 'can be created with a server override' do
opts = {:a_channel_arg => 'an_arg', :server_override => @server}
blk = Proc.new do
opts = { a_channel_arg: 'an_arg', server_override: @server }
blk = proc do
RpcServer.new(**opts)
end
expect(&blk).not_to raise_error
end
it 'cannot be created with a bad server override' do
blk = Proc.new do
blk = proc do
opts = {
:a_channel_arg => 'an_arg',
:server_override => Object.new
a_channel_arg: 'an_arg',
server_override: Object.new
}
RpcServer.new(**opts)
end
expect(&blk).to raise_error
end
end
describe '#stopped?' do
before(:each) do
opts = {:a_channel_arg => 'an_arg', :poll_period => 1}
opts = { a_channel_arg: 'an_arg', poll_period: 1 }
@srv = RpcServer.new(**opts)
end
@ -229,33 +230,31 @@ describe GRPC::RpcServer do
expect(@srv.stopped?).to be(true)
t.join
end
end
describe '#running?' do
it 'starts out false' do
opts = {:a_channel_arg => 'an_arg', :server_override => @server}
opts = { a_channel_arg: 'an_arg', server_override: @server }
r = RpcServer.new(**opts)
expect(r.running?).to be(false)
end
it 'is false after run is called with no services registered' do
opts = {
:a_channel_arg => 'an_arg',
:poll_period => 1,
:server_override => @server
a_channel_arg: 'an_arg',
poll_period: 1,
server_override: @server
}
r = RpcServer.new(**opts)
r.run()
r.run
expect(r.running?).to be(false)
end
it 'is true after run is called with a registered service' do
opts = {
:a_channel_arg => 'an_arg',
:poll_period => 1,
:server_override => @server
a_channel_arg: 'an_arg',
poll_period: 1,
server_override: @server
}
r = RpcServer.new(**opts)
r.handle(EchoService)
@ -265,13 +264,11 @@ describe GRPC::RpcServer do
r.stop
t.join
end
end
describe '#handle' do
before(:each) do
@opts = {:a_channel_arg => 'an_arg', :poll_period => 1}
@opts = { a_channel_arg: 'an_arg', poll_period: 1 }
@srv = RpcServer.new(**@opts)
end
@ -309,33 +306,30 @@ describe GRPC::RpcServer do
@srv.handle(EchoService)
expect { r.handle(EchoService) }.to raise_error
end
end
describe '#run' do
before(:each) do
@client_opts = {
:channel_override => @ch
channel_override: @ch
}
@marshal = EchoService.rpc_descs[:an_rpc].marshal_proc
@unmarshal = EchoService.rpc_descs[:an_rpc].unmarshal_proc(:output)
server_opts = {
:server_override => @server,
:completion_queue_override => @server_queue,
:poll_period => 1
server_override: @server,
completion_queue_override: @server_queue,
poll_period: 1
}
@srv = RpcServer.new(**server_opts)
end
describe 'when running' do
it 'should return NOT_FOUND status for requests on unknown methods' do
@srv.handle(EchoService)
t = Thread.new { @srv.run }
@srv.wait_till_running
req = EchoMsg.new
blk = Proc.new do
blk = proc do
cq = GRPC::Core::CompletionQueue.new
stub = GRPC::ClientStub.new(@host, cq, **@client_opts)
stub.request_response('/unknown', req, @marshal, @unmarshal)
@ -352,20 +346,19 @@ describe GRPC::RpcServer do
req = EchoMsg.new
n = 5 # arbitrary
stub = EchoStub.new(@host, **@client_opts)
n.times { |x| expect(stub.an_rpc(req)).to be_a(EchoMsg) }
n.times { expect(stub.an_rpc(req)).to be_a(EchoMsg) }
@srv.stop
t.join
end
it 'should obtain responses for multiple parallel requests' do
@srv.handle(EchoService)
t = Thread.new { @srv.run }
Thread.new { @srv.run }
@srv.wait_till_running
req, q = EchoMsg.new, Queue.new
n = 5 # arbitrary
threads = []
n.times do |x|
cq = GRPC::Core::CompletionQueue.new
n.times do
threads << Thread.new do
stub = EchoStub.new(@host, **@client_opts)
q << stub.an_rpc(req)
@ -373,44 +366,40 @@ describe GRPC::RpcServer do
end
n.times { expect(q.pop).to be_a(EchoMsg) }
@srv.stop
threads.each { |t| t.join }
threads.each(&:join)
end
it 'should return UNAVAILABLE status if there too many jobs' do
opts = {
:a_channel_arg => 'an_arg',
:server_override => @server,
:completion_queue_override => @server_queue,
:pool_size => 1,
:poll_period => 1,
:max_waiting_requests => 0
a_channel_arg: 'an_arg',
server_override: @server,
completion_queue_override: @server_queue,
pool_size: 1,
poll_period: 1,
max_waiting_requests: 0
}
alt_srv = RpcServer.new(**opts)
alt_srv.handle(SlowService)
t = Thread.new { alt_srv.run }
Thread.new { alt_srv.run }
alt_srv.wait_till_running
req = EchoMsg.new
n = 5 # arbitrary, use as many to ensure the server pool is exceeded
threads = []
_1_failed_as_unavailable = false
n.times do |x|
one_failed_as_unavailable = false
n.times do
threads << Thread.new do
cq = GRPC::Core::CompletionQueue.new
stub = SlowStub.new(@host, **@client_opts)
begin
stub.an_rpc(req)
rescue GRPC::BadStatus => e
_1_failed_as_unavailable = e.code == StatusCodes::UNAVAILABLE
one_failed_as_unavailable = e.code == StatusCodes::UNAVAILABLE
end
end
end
threads.each { |t| t.join }
threads.each(&:join)
alt_srv.stop
expect(_1_failed_as_unavailable).to be(true)
expect(one_failed_as_unavailable).to be(true)
end
end
end
end

@ -31,23 +31,24 @@ require 'grpc'
require 'grpc/generic/rpc_desc'
require 'grpc/generic/service'
# A test message that encodes/decodes using marshal/marshal.
class GoodMsg
def self.marshal(o)
def self.marshal(_o)
''
end
def self.unmarshal(o)
def self.unmarshal(_o)
GoodMsg.new
end
end
# A test message that encodes/decodes using encode/decode.
class EncodeDecodeMsg
def self.encode(o)
def self.encode(_o)
''
end
def self.decode(o)
def self.decode(_o)
GoodMsg.new
end
end
@ -55,7 +56,6 @@ end
GenericService = GRPC::GenericService
Dsl = GenericService::Dsl
describe 'String#underscore' do
it 'should convert CamelCase to underscore separated' do
expect('AnRPC'.underscore).to eq('an_rpc')
@ -66,20 +66,14 @@ describe 'String#underscore' do
end
describe Dsl do
it 'can be included in new classes' do
blk = Proc.new do
c = Class.new { include Dsl }
end
blk = proc { Class.new { include Dsl } }
expect(&blk).to_not raise_error
end
end
describe GenericService do
describe 'including it' do
it 'adds a class method, rpc' do
c = Class.new do
include GenericService
@ -144,9 +138,8 @@ describe GenericService do
end
describe '#include' do
it 'raises if #rpc is missing an arg' do
blk = Proc.new do
blk = proc do
Class.new do
include GenericService
rpc :AnRpc, GoodMsg
@ -154,7 +147,7 @@ describe GenericService do
end
expect(&blk).to raise_error ArgumentError
blk = Proc.new do
blk = proc do
Class.new do
include GenericService
rpc :AnRpc
@ -164,9 +157,8 @@ describe GenericService do
end
describe 'when #rpc args are incorrect' do
it 'raises if an arg does not have the marshal or unmarshal methods' do
blk = Proc.new do
blk = proc do
Class.new do
include GenericService
rpc :AnRpc, GoodMsg, Object
@ -176,13 +168,14 @@ describe GenericService do
end
it 'raises if a type arg only has the marshal method' do
# a bad message type with only a marshal method
class OnlyMarshal
def marshal(o)
o
end
end
blk = Proc.new do
blk = proc do
Class.new do
include GenericService
rpc :AnRpc, OnlyMarshal, GoodMsg
@ -192,12 +185,13 @@ describe GenericService do
end
it 'raises if a type arg only has the unmarshal method' do
# a bad message type with only an unmarshal method
class OnlyUnmarshal
def self.ummarshal(o)
o
end
end
blk = Proc.new do
blk = proc do
Class.new do
include GenericService
rpc :AnRpc, GoodMsg, OnlyUnmarshal
@ -208,7 +202,7 @@ describe GenericService do
end
it 'is ok for services that expect the default {un,}marshal methods' do
blk = Proc.new do
blk = proc do
Class.new do
include GenericService
rpc :AnRpc, GoodMsg, GoodMsg
@ -218,7 +212,7 @@ describe GenericService do
end
it 'is ok for services that override the default {un,}marshal methods' do
blk = Proc.new do
blk = proc do
Class.new do
include GenericService
self.marshal_class_method = :encode
@ -228,11 +222,9 @@ describe GenericService do
end
expect(&blk).not_to raise_error
end
end
describe '#rpc_stub_class' do
it 'generates a client class that defines any of the rpc methods' do
s = Class.new do
include GenericService
@ -249,7 +241,6 @@ describe GenericService do
end
describe 'the generated instances' do
it 'can be instanciated with just a hostname' do
s = Class.new do
include GenericService
@ -277,13 +268,10 @@ describe GenericService do
expect(o.methods).to include(:a_client_streamer)
expect(o.methods).to include(:a_bidi_streamer)
end
end
end
describe '#assert_rpc_descs_have_methods' do
it 'fails if there is no instance method for an rpc descriptor' do
c1 = Class.new do
include GenericService
@ -310,16 +298,16 @@ describe GenericService do
rpc :AClientStreamer, stream(GoodMsg), GoodMsg
rpc :ABidiStreamer, stream(GoodMsg), stream(GoodMsg)
def an_rpc(req, call)
def an_rpc(_req, _call)
end
def a_server_streamer(req, call)
def a_server_streamer(_req, _call)
end
def a_client_streamer(call)
def a_client_streamer(_call)
end
def a_bidi_streamer(call)
def a_bidi_streamer(_call)
end
end
expect { c.assert_rpc_descs_have_methods }.to_not raise_error
@ -330,7 +318,7 @@ describe GenericService do
include GenericService
rpc :AnRpc, GoodMsg, GoodMsg
def an_rpc(req, call)
def an_rpc(_req, _call)
end
end
c = Class.new(base)
@ -344,13 +332,11 @@ describe GenericService do
rpc :AnRpc, GoodMsg, GoodMsg
end
c = Class.new(base) do
def an_rpc(req, call)
def an_rpc(_req, _call)
end
end
expect { c.assert_rpc_descs_have_methods }.to_not raise_error
expect(c.include?(GenericService)).to be(true)
end
end
end

@ -30,7 +30,6 @@
require 'grpc'
describe GRPC::Core::Metadata do
describe '#new' do
it 'should create instances' do
expect { GRPC::Core::Metadata.new('a key', 'a value') }.to_not raise_error
@ -62,5 +61,4 @@ describe GRPC::Core::Metadata do
expect(md.dup.value).to eq('a value')
end
end
end

@ -32,7 +32,7 @@ require 'socket'
# @param [Fixnum] the minimum port number to accept
# @param [Fixnum] the maximum port number to accept
# @return [Fixnum ]a free tcp port
def find_unused_tcp_port(min=32768, max=60000)
def find_unused_tcp_port(min = 32_768, max = 60_000)
# Allow the system to assign a port, by specifying 0.
# Loop until a port is assigned in the required range
loop do
@ -40,6 +40,6 @@ def find_unused_tcp_port(min=32768, max=60000)
socket.bind(Addrinfo.tcp('127.0.0.1', 0))
p = socket.local_address.ip_port
socket.close
return p if p > min and p < 60000
return p if p > min && p < max
end
end

@ -35,13 +35,10 @@ def load_test_certs
files.map { |f| File.open(File.join(test_root, f)).read }
end
describe GRPC::Core::ServerCredentials do
Creds = GRPC::Core::ServerCredentials
describe '#new' do
it 'can be constructed from a fake CA PEM, server PEM and a server key' do
expect { Creds.new('a', 'b', 'c') }.not_to raise_error
end
@ -53,22 +50,20 @@ describe GRPC::Core::ServerCredentials do
it 'cannot be constructed without a server cert chain' do
root_cert, server_key, _ = load_test_certs
blk = Proc.new { Creds.new(root_cert, server_key, nil) }
blk = proc { Creds.new(root_cert, server_key, nil) }
expect(&blk).to raise_error
end
it 'cannot be constructed without a server key' do
root_cert, server_key, _ = load_test_certs
blk = Proc.new { Creds.new(root_cert, _, cert_chain) }
root_cert, _, _ = load_test_certs
blk = proc { Creds.new(root_cert, nil, cert_chain) }
expect(&blk).to raise_error
end
it 'can be constructed without a root_cret' do
_, server_key, cert_chain = load_test_certs
blk = Proc.new { Creds.new(_, server_key, cert_chain) }
blk = proc { Creds.new(nil, server_key, cert_chain) }
expect(&blk).to_not raise_error
end
end
end

@ -39,7 +39,6 @@ end
Server = GRPC::Core::Server
describe Server do
def create_test_cert
GRPC::Core::ServerCredentials.new(*load_test_certs)
end
@ -49,11 +48,8 @@ describe Server do
end
describe '#start' do
it 'runs without failing' do
blk = Proc.new do
s = Server.new(@cq, nil).start
end
blk = proc { Server.new(@cq, nil).start }
expect(&blk).to_not raise_error
end
@ -62,20 +58,19 @@ describe Server do
s.close
expect { s.start }.to raise_error(RuntimeError)
end
end
describe '#destroy' do
it 'destroys a server ok' do
s = start_a_server
blk = Proc.new { s.destroy }
blk = proc { s.destroy }
expect(&blk).to_not raise_error
end
it 'can be called more than once without error' do
s = start_a_server
begin
blk = Proc.new { s.destroy }
blk = proc { s.destroy }
expect(&blk).to_not raise_error
blk.call
expect(&blk).to_not raise_error
@ -89,7 +84,7 @@ describe Server do
it 'closes a server ok' do
s = start_a_server
begin
blk = Proc.new { s.close }
blk = proc { s.close }
expect(&blk).to_not raise_error
ensure
s.close
@ -98,7 +93,7 @@ describe Server do
it 'can be called more than once without error' do
s = start_a_server
blk = Proc.new { s.close }
blk = proc { s.close }
expect(&blk).to_not raise_error
blk.call
expect(&blk).to_not raise_error
@ -106,11 +101,9 @@ describe Server do
end
describe '#add_http_port' do
describe 'for insecure servers' do
it 'runs without failing' do
blk = Proc.new do
blk = proc do
s = Server.new(@cq, nil)
s.add_http2_port('localhost:0')
s.close
@ -123,13 +116,11 @@ describe Server do
s.close
expect { s.add_http2_port('localhost:0') }.to raise_error(RuntimeError)
end
end
describe 'for secure servers' do
it 'runs without failing' do
blk = Proc.new do
blk = proc do
s = Server.new(@cq, nil)
s.add_http2_port('localhost:0', true)
s.close
@ -140,16 +131,13 @@ describe Server do
it 'fails if the server is closed' do
s = Server.new(@cq, nil)
s.close
blk = Proc.new { s.add_http2_port('localhost:0', true) }
blk = proc { s.add_http2_port('localhost:0', true) }
expect(&blk).to raise_error(RuntimeError)
end
end
end
shared_examples '#new' do
it 'takes a completion queue with nil channel args' do
expect { Server.new(@cq, nil, create_test_cert) }.to_not raise_error
end
@ -162,14 +150,14 @@ describe Server do
end
it 'does not take a hash with bad values as channel args' do
blk = construct_with_args(:symbol => Object.new)
blk = construct_with_args(symbol: Object.new)
expect(&blk).to raise_error TypeError
blk = construct_with_args('1' => Hash.new)
expect(&blk).to raise_error TypeError
end
it 'can take a hash with a symbol key as channel args' do
blk = construct_with_args(:a_symbol => 1)
blk = construct_with_args(a_symbol: 1)
expect(&blk).to_not raise_error
end
@ -179,46 +167,41 @@ describe Server do
end
it 'can take a hash with a string value as channel args' do
blk = construct_with_args(:a_symbol => '1')
blk = construct_with_args(a_symbol: '1')
expect(&blk).to_not raise_error
end
it 'can take a hash with a symbol value as channel args' do
blk = construct_with_args(:a_symbol => :another_symbol)
blk = construct_with_args(a_symbol: :another_symbol)
expect(&blk).to_not raise_error
end
it 'can take a hash with a numeric value as channel args' do
blk = construct_with_args(:a_symbol => 1)
blk = construct_with_args(a_symbol: 1)
expect(&blk).to_not raise_error
end
it 'can take a hash with many args as channel args' do
args = Hash[127.times.collect { |x| [x.to_s, x] } ]
args = Hash[127.times.collect { |x| [x.to_s, x] }]
blk = construct_with_args(args)
expect(&blk).to_not raise_error
end
end
describe '#new with an insecure channel' do
def construct_with_args(a)
Proc.new { Server.new(@cq, a) }
proc { Server.new(@cq, a) }
end
it_behaves_like '#new'
end
describe '#new with a secure channel' do
def construct_with_args(a)
Proc.new { Server.new(@cq, a, create_test_cert) }
proc { Server.new(@cq, a, create_test_cert) }
end
it_behaves_like '#new'
end
def start_a_server
@ -229,5 +212,4 @@ describe Server do
s.start
s
end
end

@ -32,7 +32,6 @@ require 'grpc'
TimeConsts = GRPC::Core::TimeConsts
describe TimeConsts do
before(:each) do
@known_consts = [:ZERO, :INFINITE_FUTURE, :INFINITE_PAST].sort
end
@ -49,11 +48,9 @@ describe TimeConsts do
end
end
end
end
describe '#from_relative_time' do
it 'cannot handle arbitrary objects' do
expect { TimeConsts.from_relative_time(Object.new) }.to raise_error
end
@ -89,5 +86,4 @@ describe '#from_relative_time' do
expect(abs.to_f).to be_within(epsilon).of(want.to_f)
end
end
end

Loading…
Cancel
Save