Merge pull request #1188 from tbetbetbe/grpc_ruby_switch_to_new_call_api

Grpc ruby switch to new call api
pull/1264/head
Jan Tattermusch 10 years ago
commit d74896f086
  1. 204
      src/ruby/ext/grpc/rb_byte_buffer.c
  2. 16
      src/ruby/ext/grpc/rb_byte_buffer.h
  3. 699
      src/ruby/ext/grpc/rb_call.c
  4. 9
      src/ruby/ext/grpc/rb_call.h
  5. 31
      src/ruby/ext/grpc/rb_channel.c
  6. 18
      src/ruby/ext/grpc/rb_completion_queue.c
  7. 8
      src/ruby/ext/grpc/rb_completion_queue.h
  8. 361
      src/ruby/ext/grpc/rb_event.c
  9. 53
      src/ruby/ext/grpc/rb_event.h
  10. 13
      src/ruby/ext/grpc/rb_grpc.c
  11. 9
      src/ruby/ext/grpc/rb_grpc.h
  12. 215
      src/ruby/ext/grpc/rb_metadata.c
  13. 53
      src/ruby/ext/grpc/rb_metadata.h
  14. 85
      src/ruby/ext/grpc/rb_server.c
  15. 1
      src/ruby/lib/grpc.rb
  16. 44
      src/ruby/lib/grpc/core/event.rb
  17. 4
      src/ruby/lib/grpc/errors.rb
  18. 177
      src/ruby/lib/grpc/generic/active_call.rb
  19. 57
      src/ruby/lib/grpc/generic/bidi_call.rb
  20. 52
      src/ruby/lib/grpc/generic/client_stub.rb
  21. 9
      src/ruby/lib/grpc/generic/rpc_desc.rb
  22. 79
      src/ruby/lib/grpc/generic/rpc_server.rb
  23. 44
      src/ruby/spec/alloc_spec.rb
  24. 67
      src/ruby/spec/byte_buffer_spec.rb
  25. 61
      src/ruby/spec/call_spec.rb
  26. 29
      src/ruby/spec/channel_spec.rb
  27. 356
      src/ruby/spec/client_server_spec.rb
  28. 53
      src/ruby/spec/event_spec.rb
  29. 144
      src/ruby/spec/generic/active_call_spec.rb
  30. 136
      src/ruby/spec/generic/client_stub_spec.rb
  31. 57
      src/ruby/spec/generic/rpc_desc_spec.rb
  32. 2
      src/ruby/spec/generic/rpc_server_spec.rb
  33. 64
      src/ruby/spec/metadata_spec.rb

@ -39,203 +39,29 @@
#include <grpc/support/slice.h> #include <grpc/support/slice.h>
#include "rb_grpc.h" #include "rb_grpc.h"
/* grpc_rb_byte_buffer wraps a grpc_byte_buffer. It provides a peer ruby grpc_byte_buffer* grpc_rb_s_to_byte_buffer(char *string, size_t length) {
* object, 'mark' to minimize copying when a byte_buffer is created from gpr_slice slice = gpr_slice_from_copied_buffer(string, length);
* ruby. */ grpc_byte_buffer *buffer = grpc_byte_buffer_create(&slice, 1);
typedef struct grpc_rb_byte_buffer { gpr_slice_unref(slice);
/* Holder of ruby objects involved in constructing the status */ return buffer;
VALUE mark;
/* The actual status */
grpc_byte_buffer *wrapped;
} grpc_rb_byte_buffer;
/* Destroys ByteBuffer instances. */
static void grpc_rb_byte_buffer_free(void *p) {
grpc_rb_byte_buffer *bb = NULL;
if (p == NULL) {
return;
};
bb = (grpc_rb_byte_buffer *)p;
/* Deletes the wrapped object if the mark object is Qnil, which indicates
* that no other object is the actual owner. */
if (bb->wrapped != NULL && bb->mark == Qnil) {
grpc_byte_buffer_destroy(bb->wrapped);
}
xfree(p);
}
/* Protects the mark object from GC */
static void grpc_rb_byte_buffer_mark(void *p) {
grpc_rb_byte_buffer *bb = NULL;
if (p == NULL) {
return;
}
bb = (grpc_rb_byte_buffer *)p;
/* If it's not already cleaned up, mark the mark object */
if (bb->mark != Qnil && BUILTIN_TYPE(bb->mark) != T_NONE) {
rb_gc_mark(bb->mark);
}
} }
/* id_source is the name of the hidden ivar the preserves the original VALUE grpc_rb_byte_buffer_to_s(grpc_byte_buffer *buffer) {
* byte_buffer source string */
static ID id_source;
/* Allocates ByteBuffer instances.
Provides safe default values for the byte_buffer fields. */
static VALUE grpc_rb_byte_buffer_alloc(VALUE cls) {
grpc_rb_byte_buffer *wrapper = ALLOC(grpc_rb_byte_buffer);
wrapper->wrapped = NULL;
wrapper->mark = Qnil;
return Data_Wrap_Struct(cls, grpc_rb_byte_buffer_mark,
grpc_rb_byte_buffer_free, wrapper);
}
/* Clones ByteBuffer instances.
Gives ByteBuffer a consistent implementation of Ruby's object copy/dup
protocol. */
static VALUE grpc_rb_byte_buffer_init_copy(VALUE copy, VALUE orig) {
grpc_rb_byte_buffer *orig_bb = NULL;
grpc_rb_byte_buffer *copy_bb = NULL;
if (copy == orig) {
return copy;
}
/* Raise an error if orig is not a metadata object or a subclass. */
if (TYPE(orig) != T_DATA ||
RDATA(orig)->dfree != (RUBY_DATA_FUNC)grpc_rb_byte_buffer_free) {
rb_raise(rb_eTypeError, "not a %s", rb_obj_classname(rb_cByteBuffer));
}
Data_Get_Struct(orig, grpc_rb_byte_buffer, orig_bb);
Data_Get_Struct(copy, grpc_rb_byte_buffer, copy_bb);
/* use ruby's MEMCPY to make a byte-for-byte copy of the metadata wrapper
* object. */
MEMCPY(copy_bb, orig_bb, grpc_rb_byte_buffer, 1);
return copy;
}
/* id_empty is used to return the empty string from to_s when necessary. */
static ID id_empty;
static VALUE grpc_rb_byte_buffer_to_s(VALUE self) {
grpc_rb_byte_buffer *wrapper = NULL;
grpc_byte_buffer *bb = NULL;
grpc_byte_buffer_reader *reader = NULL;
char *output = NULL;
size_t length = 0; size_t length = 0;
char *string = NULL;
size_t offset = 0; size_t offset = 0;
VALUE output_obj = Qnil; grpc_byte_buffer_reader *reader = NULL;
gpr_slice next; gpr_slice next;
if (buffer == NULL) {
return Qnil;
Data_Get_Struct(self, grpc_rb_byte_buffer, wrapper);
output_obj = rb_ivar_get(wrapper->mark, id_source);
if (output_obj != Qnil) {
/* From ruby, ByteBuffers are immutable so if a source is set, return that
* as the to_s value */
return output_obj;
}
/* Read the bytes. */
bb = wrapper->wrapped;
if (bb == NULL) {
return rb_id2str(id_empty);
}
length = grpc_byte_buffer_length(bb);
if (length == 0) {
return rb_id2str(id_empty);
} }
reader = grpc_byte_buffer_reader_create(bb); length = grpc_byte_buffer_length(buffer);
output = xmalloc(length); string = xmalloc(length + 1);
reader = grpc_byte_buffer_reader_create(buffer);
while (grpc_byte_buffer_reader_next(reader, &next) != 0) { while (grpc_byte_buffer_reader_next(reader, &next) != 0) {
memcpy(output + offset, GPR_SLICE_START_PTR(next), GPR_SLICE_LENGTH(next)); memcpy(string + offset, GPR_SLICE_START_PTR(next), GPR_SLICE_LENGTH(next));
offset += GPR_SLICE_LENGTH(next); offset += GPR_SLICE_LENGTH(next);
} }
output_obj = rb_str_new(output, length); return rb_str_new(string, length);
/* Save a references to the computed string in the mark object so that the
* calling to_s does not do any allocations. */
wrapper->mark = rb_class_new_instance(0, NULL, rb_cObject);
rb_ivar_set(wrapper->mark, id_source, output_obj);
return output_obj;
}
/* Initializes ByteBuffer instances. */
static VALUE grpc_rb_byte_buffer_init(VALUE self, VALUE src) {
gpr_slice a_slice;
grpc_rb_byte_buffer *wrapper = NULL;
grpc_byte_buffer *byte_buffer = NULL;
if (TYPE(src) != T_STRING) {
rb_raise(rb_eTypeError, "bad byte_buffer arg: got <%s>, want <String>",
rb_obj_classname(src));
return Qnil;
}
Data_Get_Struct(self, grpc_rb_byte_buffer, wrapper);
a_slice = gpr_slice_malloc(RSTRING_LEN(src));
memcpy(GPR_SLICE_START_PTR(a_slice), RSTRING_PTR(src), RSTRING_LEN(src));
byte_buffer = grpc_byte_buffer_create(&a_slice, 1);
gpr_slice_unref(a_slice);
if (byte_buffer == NULL) {
rb_raise(rb_eArgError, "could not create a byte_buffer, not sure why");
return Qnil;
}
wrapper->wrapped = byte_buffer;
/* Save a references to the original string in the mark object so that the
* pointers used there is valid for the lifetime of the object. */
wrapper->mark = rb_class_new_instance(0, NULL, rb_cObject);
rb_ivar_set(wrapper->mark, id_source, src);
return self;
}
/* rb_cByteBuffer is the ruby class that proxies grpc_byte_buffer. */
VALUE rb_cByteBuffer = Qnil;
void Init_grpc_byte_buffer() {
rb_cByteBuffer =
rb_define_class_under(rb_mGrpcCore, "ByteBuffer", rb_cObject);
/* Allocates an object managed by the ruby runtime */
rb_define_alloc_func(rb_cByteBuffer, grpc_rb_byte_buffer_alloc);
/* Provides a ruby constructor and support for dup/clone. */
rb_define_method(rb_cByteBuffer, "initialize", grpc_rb_byte_buffer_init, 1);
rb_define_method(rb_cByteBuffer, "initialize_copy",
grpc_rb_byte_buffer_init_copy, 1);
/* Provides a to_s method that returns the buffer value */
rb_define_method(rb_cByteBuffer, "to_s", grpc_rb_byte_buffer_to_s, 0);
id_source = rb_intern("__source");
id_empty = rb_intern("");
}
VALUE grpc_rb_byte_buffer_create_with_mark(VALUE mark, grpc_byte_buffer *bb) {
grpc_rb_byte_buffer *byte_buffer = NULL;
if (bb == NULL) {
return Qnil;
}
byte_buffer = ALLOC(grpc_rb_byte_buffer);
byte_buffer->wrapped = bb;
byte_buffer->mark = mark;
return Data_Wrap_Struct(rb_cByteBuffer, grpc_rb_byte_buffer_mark,
grpc_rb_byte_buffer_free, byte_buffer);
}
/* Gets the wrapped byte_buffer from the ruby wrapper */
grpc_byte_buffer *grpc_rb_get_wrapped_byte_buffer(VALUE v) {
grpc_rb_byte_buffer *wrapper = NULL;
Data_Get_Struct(v, grpc_rb_byte_buffer, wrapper);
return wrapper->wrapped;
} }

@ -37,18 +37,10 @@
#include <grpc/grpc.h> #include <grpc/grpc.h>
#include <ruby.h> #include <ruby.h>
/* rb_cByteBuffer is the ByteBuffer class whose instances proxy /* Converts a char* with a length to a grpc_byte_buffer */
grpc_byte_buffer. */ grpc_byte_buffer *grpc_rb_s_to_byte_buffer(char *string, size_t length);
extern VALUE rb_cByteBuffer;
/* Initializes the ByteBuffer class. */ /* Converts a grpc_byte_buffer to a ruby string */
void Init_grpc_byte_buffer(); VALUE grpc_rb_byte_buffer_to_s(grpc_byte_buffer *buffer);
/* grpc_rb_byte_buffer_create_with_mark creates a grpc_rb_byte_buffer with a
* ruby mark object that will be kept alive while the byte_buffer is alive. */
VALUE grpc_rb_byte_buffer_create_with_mark(VALUE mark, grpc_byte_buffer* bb);
/* Gets the wrapped byte_buffer from its ruby object. */
grpc_byte_buffer* grpc_rb_get_wrapped_byte_buffer(VALUE v);
#endif /* GRPC_RB_BYTE_BUFFER_H_ */ #endif /* GRPC_RB_BYTE_BUFFER_H_ */

@ -36,11 +36,19 @@
#include <ruby.h> #include <ruby.h>
#include <grpc/grpc.h> #include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include "rb_byte_buffer.h" #include "rb_byte_buffer.h"
#include "rb_completion_queue.h" #include "rb_completion_queue.h"
#include "rb_metadata.h"
#include "rb_grpc.h" #include "rb_grpc.h"
/* rb_sBatchResult is struct class used to hold the results of a batch call */
static VALUE rb_sBatchResult;
/* rb_cMdAry is the MetadataArray class whose instances proxy
* grpc_metadata_array. */
static VALUE rb_cMdAry;
/* id_cq is the name of the hidden ivar that preserves a reference to a /* id_cq is the name of the hidden ivar that preserves a reference to a
* completion queue */ * completion queue */
static ID id_cq; static ID id_cq;
@ -62,6 +70,15 @@ static ID id_metadata;
* received by the call and subsequently saved on it. */ * received by the call and subsequently saved on it. */
static ID id_status; static ID id_status;
/* sym_* are the symbol for attributes of rb_sBatchResult. */
static VALUE sym_send_message;
static VALUE sym_send_metadata;
static VALUE sym_send_close;
static VALUE sym_send_status;
static VALUE sym_message;
static VALUE sym_status;
static VALUE sym_cancelled;
/* hash_all_calls is a hash of Call address -> reference count that is used to /* hash_all_calls is a hash of Call address -> reference count that is used to
* track the creation and destruction of rb_call instances. * track the creation and destruction of rb_call instances.
*/ */
@ -101,84 +118,6 @@ const char *grpc_call_error_detail_of(grpc_call_error err) {
return detail; return detail;
} }
/* grpc_rb_call_add_metadata_hash_cb is the hash iteration callback used by
grpc_rb_call_add_metadata.
*/
int grpc_rb_call_add_metadata_hash_cb(VALUE key, VALUE val, VALUE call_obj) {
grpc_call *call = NULL;
grpc_metadata *md = NULL;
VALUE md_obj = Qnil;
VALUE md_obj_args[2];
VALUE flags = rb_ivar_get(call_obj, id_flags);
grpc_call_error err;
int array_length;
int i;
/* Construct a metadata object from key and value and add it */
Data_Get_Struct(call_obj, grpc_call, call);
md_obj_args[0] = key;
if (TYPE(val) == T_ARRAY) {
/* If the value is an array, add each value in the array separately */
array_length = RARRAY_LEN(val);
for (i = 0; i < array_length; i++) {
md_obj_args[1] = rb_ary_entry(val, i);
md_obj = rb_class_new_instance(2, md_obj_args, rb_cMetadata);
md = grpc_rb_get_wrapped_metadata(md_obj);
err = grpc_call_add_metadata_old(call, md, NUM2UINT(flags));
if (err != GRPC_CALL_OK) {
rb_raise(rb_eCallError, "add metadata failed: %s (code=%d)",
grpc_call_error_detail_of(err), err);
return ST_STOP;
}
}
} else {
md_obj_args[1] = val;
md_obj = rb_class_new_instance(2, md_obj_args, rb_cMetadata);
md = grpc_rb_get_wrapped_metadata(md_obj);
err = grpc_call_add_metadata_old(call, md, NUM2UINT(flags));
if (err != GRPC_CALL_OK) {
rb_raise(rb_eCallError, "add metadata failed: %s (code=%d)",
grpc_call_error_detail_of(err), err);
return ST_STOP;
}
}
return ST_CONTINUE;
}
/*
call-seq:
call.add_metadata(completion_queue, hash_elements, flags=nil)
Add metadata elements to the call from a ruby hash, to be sent upon
invocation. flags is a bit-field combination of the write flags defined
above. REQUIRES: grpc_call_invoke/grpc_call_accept have not been
called on this call. Produces no events. */
static VALUE grpc_rb_call_add_metadata(int argc, VALUE *argv, VALUE self) {
VALUE metadata;
VALUE flags = Qnil;
ID id_size = rb_intern("size");
/* "11" == 1 mandatory args, 1 (flags) is optional */
rb_scan_args(argc, argv, "11", &metadata, &flags);
if (NIL_P(flags)) {
flags = UINT2NUM(0); /* Default to no flags */
}
if (TYPE(metadata) != T_HASH) {
rb_raise(rb_eTypeError, "add metadata failed: metadata should be a hash");
return Qnil;
}
if (NUM2UINT(rb_funcall(metadata, id_size, 0)) == 0) {
return Qnil;
}
rb_ivar_set(self, id_flags, flags);
rb_ivar_set(self, id_input_md, metadata);
rb_hash_foreach(metadata, grpc_rb_call_add_metadata_hash_cb, self);
return Qnil;
}
/* Called by clients to cancel an RPC on the server. /* Called by clients to cancel an RPC on the server.
Can be called multiple times, from any thread. */ Can be called multiple times, from any thread. */
static VALUE grpc_rb_call_cancel(VALUE self) { static VALUE grpc_rb_call_cancel(VALUE self) {
@ -194,63 +133,6 @@ static VALUE grpc_rb_call_cancel(VALUE self) {
return Qnil; return Qnil;
} }
/*
call-seq:
call.invoke(completion_queue, tag, flags=nil)
Invoke the RPC. Starts sending metadata and request headers on the wire.
flags is a bit-field combination of the write flags defined above.
REQUIRES: Can be called at most once per call.
Can only be called on the client.
Produces a GRPC_INVOKE_ACCEPTED event on completion. */
static VALUE grpc_rb_call_invoke(int argc, VALUE *argv, VALUE self) {
VALUE cqueue = Qnil;
VALUE metadata_read_tag = Qnil;
VALUE finished_tag = Qnil;
VALUE flags = Qnil;
grpc_call *call = NULL;
grpc_completion_queue *cq = NULL;
grpc_call_error err;
/* "31" == 3 mandatory args, 1 (flags) is optional */
rb_scan_args(argc, argv, "31", &cqueue, &metadata_read_tag, &finished_tag,
&flags);
if (NIL_P(flags)) {
flags = UINT2NUM(0); /* Default to no flags */
}
cq = grpc_rb_get_wrapped_completion_queue(cqueue);
Data_Get_Struct(self, grpc_call, call);
err = grpc_call_invoke_old(call, cq, ROBJECT(metadata_read_tag),
ROBJECT(finished_tag), NUM2UINT(flags));
if (err != GRPC_CALL_OK) {
rb_raise(rb_eCallError, "invoke failed: %s (code=%d)",
grpc_call_error_detail_of(err), err);
}
/* Add the completion queue as an instance attribute, prevents it from being
* GCed until this call object is GCed */
rb_ivar_set(self, id_cq, cqueue);
return Qnil;
}
/* Initiate a read on a call. Output event contains a byte buffer with the
result of the read.
REQUIRES: No other reads are pending on the call. It is only safe to start
the next read after the corresponding read event is received. */
static VALUE grpc_rb_call_start_read(VALUE self, VALUE tag) {
grpc_call *call = NULL;
grpc_call_error err;
Data_Get_Struct(self, grpc_call, call);
err = grpc_call_start_read_old(call, ROBJECT(tag));
if (err != GRPC_CALL_OK) {
rb_raise(rb_eCallError, "start read failed: %s (code=%d)",
grpc_call_error_detail_of(err), err);
}
return Qnil;
}
/* /*
call-seq: call-seq:
status = call.status status = call.status
@ -299,147 +181,402 @@ static VALUE grpc_rb_call_set_metadata(VALUE self, VALUE metadata) {
return rb_ivar_set(self, id_metadata, metadata); return rb_ivar_set(self, id_metadata, metadata);
} }
/* /* grpc_rb_md_ary_fill_hash_cb is the hash iteration callback used
call-seq: to fill grpc_metadata_array.
call.start_write(byte_buffer, tag, flags=nil)
Queue a byte buffer for writing.
flags is a bit-field combination of the write flags defined above.
A write with byte_buffer null is allowed, and will not send any bytes on the
wire. If this is performed without GRPC_WRITE_BUFFER_HINT flag it provides
a mechanism to flush any previously buffered writes to outgoing flow control.
REQUIRES: No other writes are pending on the call. It is only safe to
start the next write after the corresponding write_accepted event
is received.
GRPC_INVOKE_ACCEPTED must have been received by the application
prior to calling this on the client. On the server,
grpc_call_accept must have been called successfully.
Produces a GRPC_WRITE_ACCEPTED event. */
static VALUE grpc_rb_call_start_write(int argc, VALUE *argv, VALUE self) {
VALUE byte_buffer = Qnil;
VALUE tag = Qnil;
VALUE flags = Qnil;
grpc_call *call = NULL;
grpc_byte_buffer *bfr = NULL;
grpc_call_error err;
/* "21" == 2 mandatory args, 1 (flags) is optional */ it's capacity should have been computed via a prior call to
rb_scan_args(argc, argv, "21", &byte_buffer, &tag, &flags); grpc_rb_md_ary_fill_hash_cb
if (NIL_P(flags)) { */
flags = UINT2NUM(0); /* Default to no flags */ int grpc_rb_md_ary_fill_hash_cb(VALUE key, VALUE val, VALUE md_ary_obj) {
grpc_metadata_array *md_ary = NULL;
int array_length;
int i;
/* Construct a metadata object from key and value and add it */
Data_Get_Struct(md_ary_obj, grpc_metadata_array, md_ary);
if (TYPE(val) == T_ARRAY) {
/* If the value is an array, add capacity for each value in the array */
array_length = RARRAY_LEN(val);
for (i = 0; i < array_length; i++) {
if (TYPE(key) == T_SYMBOL) {
md_ary->metadata[md_ary->count].key = (char *)rb_id2name(SYM2ID(key));
} else { /* StringValueCStr does all other type exclusions for us */
md_ary->metadata[md_ary->count].key = StringValueCStr(key);
} }
bfr = grpc_rb_get_wrapped_byte_buffer(byte_buffer); md_ary->metadata[md_ary->count].value = RSTRING_PTR(rb_ary_entry(val, i));
Data_Get_Struct(self, grpc_call, call); md_ary->metadata[md_ary->count].value_length =
err = grpc_call_start_write_old(call, bfr, ROBJECT(tag), NUM2UINT(flags)); RSTRING_LEN(rb_ary_entry(val, i));
if (err != GRPC_CALL_OK) { md_ary->count += 1;
rb_raise(rb_eCallError, "start write failed: %s (code=%d)", }
grpc_call_error_detail_of(err), err); } else {
if (TYPE(key) == T_SYMBOL) {
md_ary->metadata[md_ary->count].key = (char *)rb_id2name(SYM2ID(key));
} else { /* StringValueCStr does all other type exclusions for us */
md_ary->metadata[md_ary->count].key = StringValueCStr(key);
}
md_ary->metadata[md_ary->count].value = RSTRING_PTR(val);
md_ary->metadata[md_ary->count].value_length = RSTRING_LEN(val);
md_ary->count += 1;
} }
return Qnil; return ST_CONTINUE;
} }
/* Queue a status for writing. /* grpc_rb_md_ary_capacity_hash_cb is the hash iteration callback used
to pre-compute the capacity a grpc_metadata_array.
*/
int grpc_rb_md_ary_capacity_hash_cb(VALUE key, VALUE val, VALUE md_ary_obj) {
grpc_metadata_array *md_ary = NULL;
call-seq: /* Construct a metadata object from key and value and add it */
tag = Object.new Data_Get_Struct(md_ary_obj, grpc_metadata_array, md_ary);
call.write_status(200, "OK", tag)
REQUIRES: No other writes are pending on the call. It is only safe to
start the next write after the corresponding write_accepted event
is received.
GRPC_INVOKE_ACCEPTED must have been received by the application
prior to calling this.
Only callable on the server.
Produces a GRPC_FINISHED event when the status is sent and the stream is
fully closed */
static VALUE grpc_rb_call_start_write_status(VALUE self, VALUE code,
VALUE status, VALUE tag) {
grpc_call *call = NULL;
grpc_call_error err;
Data_Get_Struct(self, grpc_call, call);
err = grpc_call_start_write_status_old(call, NUM2UINT(code),
StringValueCStr(status), ROBJECT(tag));
if (err != GRPC_CALL_OK) {
rb_raise(rb_eCallError, "start write status: %s (code=%d)",
grpc_call_error_detail_of(err), err);
}
return Qnil; if (TYPE(val) == T_ARRAY) {
/* If the value is an array, add capacity for each value in the array */
md_ary->capacity += RARRAY_LEN(val);
} else {
md_ary->capacity += 1;
}
return ST_CONTINUE;
} }
/* No more messages to send. /* grpc_rb_md_ary_convert converts a ruby metadata hash into
REQUIRES: No other writes are pending on the call. */ a grpc_metadata_array.
static VALUE grpc_rb_call_writes_done(VALUE self, VALUE tag) { */
grpc_call *call = NULL; void grpc_rb_md_ary_convert(VALUE md_ary_hash, grpc_metadata_array *md_ary) {
grpc_call_error err; VALUE md_ary_obj = Qnil;
Data_Get_Struct(self, grpc_call, call); if (md_ary_hash == Qnil) {
err = grpc_call_writes_done_old(call, ROBJECT(tag)); return; /* Do nothing if the expected has value is nil */
if (err != GRPC_CALL_OK) { }
rb_raise(rb_eCallError, "writes done: %s (code=%d)", if (TYPE(md_ary_hash) != T_HASH) {
grpc_call_error_detail_of(err), err); rb_raise(rb_eTypeError, "md_ary_convert: got <%s>, want <Hash>",
rb_obj_classname(md_ary_hash));
return;
} }
return Qnil; /* Initialize the array, compute it's capacity, then fill it. */
grpc_metadata_array_init(md_ary);
md_ary_obj = Data_Wrap_Struct(rb_cMdAry, GC_NOT_MARKED, GC_DONT_FREE, md_ary);
rb_hash_foreach(md_ary_hash, grpc_rb_md_ary_capacity_hash_cb, md_ary_obj);
md_ary->metadata = gpr_malloc(md_ary->capacity * sizeof(grpc_metadata));
rb_hash_foreach(md_ary_hash, grpc_rb_md_ary_fill_hash_cb, md_ary_obj);
}
/* Converts a metadata array to a hash. */
VALUE grpc_rb_md_ary_to_h(grpc_metadata_array *md_ary) {
VALUE key = Qnil;
VALUE new_ary = Qnil;
VALUE value = Qnil;
VALUE result = rb_hash_new();
size_t i;
for (i = 0; i < md_ary->count; i++) {
key = rb_str_new2(md_ary->metadata[i].key);
value = rb_hash_aref(result, key);
if (value == Qnil) {
value = rb_str_new(md_ary->metadata[i].value,
md_ary->metadata[i].value_length);
rb_hash_aset(result, key, value);
} else if (TYPE(value) == T_ARRAY) {
/* Add the string to the returned array */
rb_ary_push(value,
rb_str_new(md_ary->metadata[i].value,
md_ary->metadata[i].value_length));
} else {
/* Add the current value with this key and the new one to an array */
new_ary = rb_ary_new();
rb_ary_push(new_ary, value);
rb_ary_push(new_ary,
rb_str_new(md_ary->metadata[i].value,
md_ary->metadata[i].value_length));
rb_hash_aset(result, key, new_ary);
}
}
return result;
} }
/* call-seq: /* grpc_rb_call_check_op_keys_hash_cb is a hash iteration func that checks
call.server_end_initial_metadata(flag) each key of an ops hash is valid.
*/
Only to be called on servers, before sending messages. int grpc_rb_call_check_op_keys_hash_cb(VALUE key, VALUE val, VALUE ops_ary) {
flags is a bit-field combination of the write flags defined above. /* Update the capacity; the value is an array, add capacity for each value in
* the array */
REQUIRES: Can be called at most once per call. if (TYPE(key) != T_FIXNUM) {
Can only be called on the server, must be called after rb_raise(rb_eTypeError, "invalid operation : got <%s>, want <Fixnum>",
grpc_call_server_accept rb_obj_classname(key));
Produces no events */ return ST_STOP;
static VALUE grpc_rb_call_server_end_initial_metadata(int argc, VALUE *argv, }
VALUE self) { switch(NUM2INT(key)) {
VALUE flags = Qnil; case GRPC_OP_SEND_INITIAL_METADATA:
grpc_call *call = NULL; case GRPC_OP_SEND_MESSAGE:
grpc_call_error err; case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
case GRPC_OP_SEND_STATUS_FROM_SERVER:
case GRPC_OP_RECV_INITIAL_METADATA:
case GRPC_OP_RECV_MESSAGE:
case GRPC_OP_RECV_STATUS_ON_CLIENT:
case GRPC_OP_RECV_CLOSE_ON_SERVER:
rb_ary_push(ops_ary, key);
return ST_CONTINUE;
default:
rb_raise(rb_eTypeError, "invalid operation : bad value %d",
NUM2INT(key));
};
return ST_STOP;
}
/* "01" == 1 (flags) is optional */ /* grpc_rb_op_update_status_from_server adds the values in a ruby status
rb_scan_args(argc, argv, "01", &flags); struct to the 'send_status_from_server' portion of an op.
if (NIL_P(flags)) { */
flags = UINT2NUM(0); /* Default to no flags */ void grpc_rb_op_update_status_from_server(grpc_op *op,
grpc_metadata_array* md_ary,
VALUE status) {
VALUE code = rb_struct_aref(status, sym_code);
VALUE details = rb_struct_aref(status, sym_details);
VALUE metadata_hash = rb_struct_aref(status, sym_metadata);
/* TODO: add check to ensure status is the correct struct type */
if (TYPE(code) != T_FIXNUM) {
rb_raise(rb_eTypeError, "invalid code : got <%s>, want <Fixnum>",
rb_obj_classname(code));
return;
} }
Data_Get_Struct(self, grpc_call, call); if (TYPE(details) != T_STRING) {
err = grpc_call_server_end_initial_metadata_old(call, NUM2UINT(flags)); rb_raise(rb_eTypeError, "invalid details : got <%s>, want <String>",
if (err != GRPC_CALL_OK) { rb_obj_classname(code));
rb_raise(rb_eCallError, "end_initial_metadata failed: %s (code=%d)", return;
grpc_call_error_detail_of(err), err);
} }
return Qnil; op->data.send_status_from_server.status = NUM2INT(code);
op->data.send_status_from_server.status_details = StringValueCStr(details);
grpc_rb_md_ary_convert(metadata_hash, md_ary);
op->data.send_status_from_server.trailing_metadata_count = md_ary->count;
op->data.send_status_from_server.trailing_metadata = md_ary->metadata;
}
/* run_batch_stack holds various values used by the
* grpc_rb_call_run_batch function */
typedef struct run_batch_stack {
/* The batch ops */
grpc_op ops[8]; /* 8 is the maximum number of operations */
size_t op_num; /* tracks the last added operation */
/* Data being sent */
grpc_metadata_array send_metadata;
grpc_metadata_array send_trailing_metadata;
/* Data being received */
grpc_byte_buffer *recv_message;
grpc_metadata_array recv_metadata;
grpc_metadata_array recv_trailing_metadata;
int recv_cancelled;
grpc_status_code recv_status;
char *recv_status_details;
size_t recv_status_details_capacity;
} run_batch_stack;
/* grpc_run_batch_stack_init ensures the run_batch_stack is properly
* initialized */
static void grpc_run_batch_stack_init(run_batch_stack* st) {
MEMZERO(st, run_batch_stack, 1);
grpc_metadata_array_init(&st->send_metadata);
grpc_metadata_array_init(&st->send_trailing_metadata);
grpc_metadata_array_init(&st->recv_metadata);
grpc_metadata_array_init(&st->recv_trailing_metadata);
st->op_num = 0;
}
/* grpc_run_batch_stack_cleanup ensures the run_batch_stack is properly
* cleaned up */
static void grpc_run_batch_stack_cleanup(run_batch_stack* st) {
grpc_metadata_array_destroy(&st->send_metadata);
grpc_metadata_array_destroy(&st->send_trailing_metadata);
grpc_metadata_array_destroy(&st->recv_metadata);
grpc_metadata_array_destroy(&st->recv_trailing_metadata);
if (st->recv_status_details != NULL) {
gpr_free(st->recv_status_details);
}
}
/* grpc_run_batch_stack_fill_ops fills the run_batch_stack ops array from
* ops_hash */
static void grpc_run_batch_stack_fill_ops(run_batch_stack* st, VALUE ops_hash) {
VALUE this_op = Qnil;
VALUE this_value = Qnil;
VALUE ops_ary = rb_ary_new();
size_t i = 0;
/* Create a ruby array with just the operation keys */
rb_hash_foreach(ops_hash, grpc_rb_call_check_op_keys_hash_cb, ops_ary);
/* Fill the ops array */
for (i = 0; i < (size_t)RARRAY_LEN(ops_ary); i++) {
this_op = rb_ary_entry(ops_ary, i);
this_value = rb_hash_aref(ops_hash, this_op);
switch(NUM2INT(this_op)) {
case GRPC_OP_SEND_INITIAL_METADATA:
/* N.B. later there is no need to explicitly delete the metadata keys
* and values, they are references to data in ruby objects. */
grpc_rb_md_ary_convert(this_value, &st->send_metadata);
st->ops[st->op_num].data.send_initial_metadata.count =
st->send_metadata.count;
st->ops[st->op_num].data.send_initial_metadata.metadata =
st->send_metadata.metadata;
break;
case GRPC_OP_SEND_MESSAGE:
st->ops[st->op_num].data.send_message =
grpc_rb_s_to_byte_buffer(RSTRING_PTR(this_value),
RSTRING_LEN(this_value));
break;
case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
break;
case GRPC_OP_SEND_STATUS_FROM_SERVER:
/* N.B. later there is no need to explicitly delete the metadata keys
* and values, they are references to data in ruby objects. */
grpc_rb_op_update_status_from_server(&st->ops[st->op_num],
&st->send_trailing_metadata,
this_value);
break;
case GRPC_OP_RECV_INITIAL_METADATA:
st->ops[st->op_num].data.recv_initial_metadata = &st->recv_metadata;
break;
case GRPC_OP_RECV_MESSAGE:
st->ops[st->op_num].data.recv_message = &st->recv_message;
break;
case GRPC_OP_RECV_STATUS_ON_CLIENT:
st->ops[st->op_num].data.recv_status_on_client.trailing_metadata =
&st->recv_trailing_metadata;
st->ops[st->op_num].data.recv_status_on_client.status =
&st->recv_status;
st->ops[st->op_num].data.recv_status_on_client.status_details =
&st->recv_status_details;
st->ops[st->op_num].data.recv_status_on_client.status_details_capacity =
&st->recv_status_details_capacity;
break;
case GRPC_OP_RECV_CLOSE_ON_SERVER:
st->ops[st->op_num].data.recv_close_on_server.cancelled =
&st->recv_cancelled;
break;
default:
grpc_run_batch_stack_cleanup(st);
rb_raise(rb_eTypeError, "invalid operation : bad value %d",
NUM2INT(this_op));
};
st->ops[st->op_num].op = (grpc_op_type)NUM2INT(this_op);
st->op_num++;
}
}
/* grpc_run_batch_stack_build_result fills constructs a ruby BatchResult struct
after the results have run */
static VALUE grpc_run_batch_stack_build_result(run_batch_stack* st) {
size_t i = 0;
VALUE result = rb_struct_new(rb_sBatchResult, Qnil, Qnil, Qnil, Qnil, Qnil,
Qnil, Qnil, Qnil, NULL);
for (i = 0; i < st->op_num; i++) {
switch(st->ops[i].op) {
case GRPC_OP_SEND_INITIAL_METADATA:
rb_struct_aset(result, sym_send_metadata, Qtrue);
break;
case GRPC_OP_SEND_MESSAGE:
rb_struct_aset(result, sym_send_message, Qtrue);
grpc_byte_buffer_destroy(st->ops[i].data.send_message);
break;
case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
rb_struct_aset(result, sym_send_close, Qtrue);
break;
case GRPC_OP_SEND_STATUS_FROM_SERVER:
rb_struct_aset(result, sym_send_status, Qtrue);
break;
case GRPC_OP_RECV_INITIAL_METADATA:
rb_struct_aset(result, sym_metadata,
grpc_rb_md_ary_to_h(&st->recv_metadata));
case GRPC_OP_RECV_MESSAGE:
rb_struct_aset(result, sym_message,
grpc_rb_byte_buffer_to_s(st->recv_message));
break;
case GRPC_OP_RECV_STATUS_ON_CLIENT:
rb_struct_aset(
result,
sym_status,
rb_struct_new(rb_sStatus,
UINT2NUM(st->recv_status),
(st->recv_status_details == NULL
? Qnil
: rb_str_new2(st->recv_status_details)),
grpc_rb_md_ary_to_h(&st->recv_trailing_metadata),
NULL));
break;
case GRPC_OP_RECV_CLOSE_ON_SERVER:
rb_struct_aset(result, sym_send_close, Qtrue);
break;
default:
break;
}
}
return result;
} }
/* call-seq: /* call-seq:
call.server_accept(completion_queue, finished_tag) cq = CompletionQueue.new
ops = {
Accept an incoming RPC, binding a completion queue to it. GRPC::Core::CallOps::SEND_INITIAL_METADATA => <op_value>,
To be called before sending or receiving messages. GRPC::Core::CallOps::SEND_MESSAGE => <op_value>,
...
REQUIRES: Can be called at most once per call. }
Can only be called on the server. tag = Object.new
Produces a GRPC_FINISHED event with finished_tag when the call has been timeout = 10
completed (there may be other events for the call pending at this call.start_batch(cqueue, tag, timeout, ops)
time) */
static VALUE grpc_rb_call_server_accept(VALUE self, VALUE cqueue, Start a batch of operations defined in the array ops; when complete, post a
VALUE finished_tag) { completion of type 'tag' to the completion queue bound to the call.
Also waits for the batch to complete, until timeout is reached.
The order of ops specified in the batch has no significance.
Only one operation of each type can be active at once in any given
batch */
static VALUE grpc_rb_call_run_batch(VALUE self, VALUE cqueue, VALUE tag,
VALUE timeout, VALUE ops_hash) {
run_batch_stack st;
grpc_call *call = NULL; grpc_call *call = NULL;
grpc_completion_queue *cq = grpc_rb_get_wrapped_completion_queue(cqueue); grpc_event *ev = NULL;
grpc_call_error err; grpc_call_error err;
VALUE result = Qnil;
Data_Get_Struct(self, grpc_call, call); Data_Get_Struct(self, grpc_call, call);
err = grpc_call_server_accept_old(call, cq, ROBJECT(finished_tag));
/* Validate the ops args, adding them to a ruby array */
if (TYPE(ops_hash) != T_HASH) {
rb_raise(rb_eTypeError, "call#run_batch: ops hash should be a hash");
return Qnil;
}
grpc_run_batch_stack_init(&st);
grpc_run_batch_stack_fill_ops(&st, ops_hash);
/* call grpc_call_start_batch, then wait for it to complete using
* pluck_event */
err = grpc_call_start_batch(call, st.ops, st.op_num, ROBJECT(tag));
if (err != GRPC_CALL_OK) { if (err != GRPC_CALL_OK) {
rb_raise(rb_eCallError, "server_accept failed: %s (code=%d)", grpc_run_batch_stack_cleanup(&st);
rb_raise(rb_eCallError, "grpc_call_start_batch failed with %s (code=%d)",
grpc_call_error_detail_of(err), err); grpc_call_error_detail_of(err), err);
return;
}
ev = grpc_rb_completion_queue_pluck_event(cqueue, tag, timeout);
if (ev == NULL) {
grpc_run_batch_stack_cleanup(&st);
rb_raise(rb_eOutOfTime, "grpc_call_start_batch timed out");
return;
}
if (ev->data.op_complete != GRPC_OP_OK) {
grpc_run_batch_stack_cleanup(&st);
rb_raise(rb_eCallError, "start_batch completion failed, (code=%d)",
ev->data.op_complete);
return;
} }
/* Add the completion queue as an instance attribute, prevents it from being /* Build and return the BatchResult struct result */
* GCed until this call object is GCed */ result = grpc_run_batch_stack_build_result(&st);
rb_ivar_set(self, id_cq, cqueue); grpc_run_batch_stack_cleanup(&st);
return Qnil; return result;
} }
/* rb_cCall is the ruby class that proxies grpc_call. */ /* rb_cCall is the ruby class that proxies grpc_call. */
@ -449,6 +586,10 @@ VALUE rb_cCall = Qnil;
operations; */ operations; */
VALUE rb_eCallError = Qnil; VALUE rb_eCallError = Qnil;
/* rb_eOutOfTime is the ruby class of the exception thrown to indicate
a timeout. */
VALUE rb_eOutOfTime = Qnil;
void Init_grpc_error_codes() { void Init_grpc_error_codes() {
/* Constants representing the error codes of grpc_call_error in grpc.h */ /* Constants representing the error codes of grpc_call_error in grpc.h */
VALUE rb_RpcErrors = rb_define_module_under(rb_mGrpcCore, "RpcErrors"); VALUE rb_RpcErrors = rb_define_module_under(rb_mGrpcCore, "RpcErrors");
@ -500,11 +641,35 @@ void Init_grpc_error_codes() {
rb_obj_freeze(rb_error_code_details); rb_obj_freeze(rb_error_code_details);
} }
void Init_grpc_op_codes() {
/* Constants representing operation type codes in grpc.h */
VALUE rb_CallOps = rb_define_module_under(rb_mGrpcCore, "CallOps");
rb_define_const(rb_CallOps, "SEND_INITIAL_METADATA",
UINT2NUM(GRPC_OP_SEND_INITIAL_METADATA));
rb_define_const(rb_CallOps, "SEND_MESSAGE", UINT2NUM(GRPC_OP_SEND_MESSAGE));
rb_define_const(rb_CallOps, "SEND_CLOSE_FROM_CLIENT",
UINT2NUM(GRPC_OP_SEND_CLOSE_FROM_CLIENT));
rb_define_const(rb_CallOps, "SEND_STATUS_FROM_SERVER",
UINT2NUM(GRPC_OP_SEND_STATUS_FROM_SERVER));
rb_define_const(rb_CallOps, "RECV_INITIAL_METADATA",
UINT2NUM(GRPC_OP_RECV_INITIAL_METADATA));
rb_define_const(rb_CallOps, "RECV_MESSAGE",
UINT2NUM(GRPC_OP_RECV_MESSAGE));
rb_define_const(rb_CallOps, "RECV_STATUS_ON_CLIENT",
UINT2NUM(GRPC_OP_RECV_STATUS_ON_CLIENT));
rb_define_const(rb_CallOps, "RECV_CLOSE_ON_SERVER",
UINT2NUM(GRPC_OP_RECV_CLOSE_ON_SERVER));
}
void Init_grpc_call() { void Init_grpc_call() {
/* CallError inherits from Exception to signal that it is non-recoverable */ /* CallError inherits from Exception to signal that it is non-recoverable */
rb_eCallError = rb_eCallError =
rb_define_class_under(rb_mGrpcCore, "CallError", rb_eException); rb_define_class_under(rb_mGrpcCore, "CallError", rb_eException);
rb_eOutOfTime =
rb_define_class_under(rb_mGrpcCore, "OutOfTime", rb_eException);
rb_cCall = rb_define_class_under(rb_mGrpcCore, "Call", rb_cObject); rb_cCall = rb_define_class_under(rb_mGrpcCore, "Call", rb_cObject);
rb_cMdAry = rb_define_class_under(rb_mGrpcCore, "MetadataArray",
rb_cObject);
/* Prevent allocation or inialization of the Call class */ /* Prevent allocation or inialization of the Call class */
rb_define_alloc_func(rb_cCall, grpc_rb_cannot_alloc); rb_define_alloc_func(rb_cCall, grpc_rb_cannot_alloc);
@ -512,17 +677,8 @@ void Init_grpc_call() {
rb_define_method(rb_cCall, "initialize_copy", grpc_rb_cannot_init_copy, 1); rb_define_method(rb_cCall, "initialize_copy", grpc_rb_cannot_init_copy, 1);
/* Add ruby analogues of the Call methods. */ /* Add ruby analogues of the Call methods. */
rb_define_method(rb_cCall, "server_accept", grpc_rb_call_server_accept, 2); rb_define_method(rb_cCall, "run_batch", grpc_rb_call_run_batch, 4);
rb_define_method(rb_cCall, "server_end_initial_metadata",
grpc_rb_call_server_end_initial_metadata, -1);
rb_define_method(rb_cCall, "add_metadata", grpc_rb_call_add_metadata, -1);
rb_define_method(rb_cCall, "cancel", grpc_rb_call_cancel, 0); rb_define_method(rb_cCall, "cancel", grpc_rb_call_cancel, 0);
rb_define_method(rb_cCall, "invoke", grpc_rb_call_invoke, -1);
rb_define_method(rb_cCall, "start_read", grpc_rb_call_start_read, 1);
rb_define_method(rb_cCall, "start_write", grpc_rb_call_start_write, -1);
rb_define_method(rb_cCall, "start_write_status",
grpc_rb_call_start_write_status, 3);
rb_define_method(rb_cCall, "writes_done", grpc_rb_call_writes_done, 1);
rb_define_method(rb_cCall, "status", grpc_rb_call_get_status, 0); rb_define_method(rb_cCall, "status", grpc_rb_call_get_status, 0);
rb_define_method(rb_cCall, "status=", grpc_rb_call_set_status, 1); rb_define_method(rb_cCall, "status=", grpc_rb_call_set_status, 1);
rb_define_method(rb_cCall, "metadata", grpc_rb_call_get_metadata, 0); rb_define_method(rb_cCall, "metadata", grpc_rb_call_get_metadata, 0);
@ -537,12 +693,35 @@ void Init_grpc_call() {
id_flags = rb_intern("__flags"); id_flags = rb_intern("__flags");
id_input_md = rb_intern("__input_md"); id_input_md = rb_intern("__input_md");
/* Ids used in constructing the batch result. */
sym_send_message = ID2SYM(rb_intern("send_message"));
sym_send_metadata = ID2SYM(rb_intern("send_metadata"));
sym_send_close = ID2SYM(rb_intern("send_close"));
sym_send_status = ID2SYM(rb_intern("send_status"));
sym_message = ID2SYM(rb_intern("message"));
sym_status = ID2SYM(rb_intern("status"));
sym_cancelled = ID2SYM(rb_intern("cancelled"));
/* The Struct used to return the run_batch result. */
rb_sBatchResult = rb_struct_define(
"BatchResult",
"send_message",
"send_metadata",
"send_close",
"send_status",
"message",
"metadata",
"status",
"cancelled",
NULL);
/* The hash for reference counting calls, to ensure they can't be destroyed /* The hash for reference counting calls, to ensure they can't be destroyed
* more than once */ * more than once */
hash_all_calls = rb_hash_new(); hash_all_calls = rb_hash_new();
rb_define_const(rb_cCall, "INTERNAL_ALL_CALLs", hash_all_calls); rb_define_const(rb_cCall, "INTERNAL_ALL_CALLs", hash_all_calls);
Init_grpc_error_codes(); Init_grpc_error_codes();
Init_grpc_op_codes();
} }
/* Gets the call from the ruby object */ /* Gets the call from the ruby object */

@ -46,13 +46,20 @@ VALUE grpc_rb_wrap_call(grpc_call* c);
/* Provides the details of an call error */ /* Provides the details of an call error */
const char* grpc_call_error_detail_of(grpc_call_error err); const char* grpc_call_error_detail_of(grpc_call_error err);
/* Converts a metadata array to a hash. */
VALUE grpc_rb_md_ary_to_h(grpc_metadata_array *md_ary);
/* rb_cCall is the Call class whose instances proxy grpc_call. */ /* rb_cCall is the Call class whose instances proxy grpc_call. */
extern VALUE rb_cCall; extern VALUE rb_cCall;
/* rb_cCallError is the ruby class of the exception thrown during call /* rb_eCallError is the ruby class of the exception thrown during call
operations. */ operations. */
extern VALUE rb_eCallError; extern VALUE rb_eCallError;
/* rb_eOutOfTime is the ruby class of the exception thrown to indicate
a timeout. */
extern VALUE rb_eOutOfTime;
/* Initializes the Call class. */ /* Initializes the Call class. */
void Init_grpc_call(); void Init_grpc_call();

@ -49,10 +49,16 @@
static ID id_channel; static ID id_channel;
/* id_target is the name of the hidden ivar that preserves a reference to the /* id_target is the name of the hidden ivar that preserves a reference to the
* target string used to create the call, preserved so that is does not get * target string used to create the call, preserved so that it does not get
* GCed before the channel */ * GCed before the channel */
static ID id_target; static ID id_target;
/* id_cqueue is the name of the hidden ivar that preserves a reference to the
* completion queue used to create the call, preserved so that it does not get
* GCed before the channel */
static ID id_cqueue;
/* Used during the conversion of a hash to channel args during channel setup */ /* Used during the conversion of a hash to channel args during channel setup */
static VALUE rb_cChannelArgs; static VALUE rb_cChannelArgs;
@ -142,6 +148,7 @@ static VALUE grpc_rb_channel_init(int argc, VALUE *argv, VALUE self) {
if (ch == NULL) { if (ch == NULL) {
rb_raise(rb_eRuntimeError, "could not create an rpc channel to target:%s", rb_raise(rb_eRuntimeError, "could not create an rpc channel to target:%s",
target_chars); target_chars);
return Qnil;
} }
rb_ivar_set(self, id_target, target); rb_ivar_set(self, id_target, target);
wrapper->wrapped = ch; wrapper->wrapped = ch;
@ -164,6 +171,7 @@ static VALUE grpc_rb_channel_init_copy(VALUE copy, VALUE orig) {
if (TYPE(orig) != T_DATA || if (TYPE(orig) != T_DATA ||
RDATA(orig)->dfree != (RUBY_DATA_FUNC)grpc_rb_channel_free) { RDATA(orig)->dfree != (RUBY_DATA_FUNC)grpc_rb_channel_free) {
rb_raise(rb_eTypeError, "not a %s", rb_obj_classname(rb_cChannel)); rb_raise(rb_eTypeError, "not a %s", rb_obj_classname(rb_cChannel));
return Qnil;
} }
Data_Get_Struct(orig, grpc_rb_channel, orig_ch); Data_Get_Struct(orig, grpc_rb_channel, orig_ch);
@ -177,34 +185,42 @@ static VALUE grpc_rb_channel_init_copy(VALUE copy, VALUE orig) {
/* Create a call given a grpc_channel, in order to call method. The request /* Create a call given a grpc_channel, in order to call method. The request
is not sent until grpc_call_invoke is called. */ is not sent until grpc_call_invoke is called. */
static VALUE grpc_rb_channel_create_call(VALUE self, VALUE method, VALUE host, static VALUE grpc_rb_channel_create_call(VALUE self, VALUE cqueue, VALUE method,
VALUE deadline) { VALUE host, VALUE deadline) {
VALUE res = Qnil; VALUE res = Qnil;
grpc_rb_channel *wrapper = NULL; grpc_rb_channel *wrapper = NULL;
grpc_channel *ch = NULL;
grpc_call *call = NULL; grpc_call *call = NULL;
grpc_channel *ch = NULL;
grpc_completion_queue *cq = NULL;
char *method_chars = StringValueCStr(method); char *method_chars = StringValueCStr(method);
char *host_chars = StringValueCStr(host); char *host_chars = StringValueCStr(host);
cq = grpc_rb_get_wrapped_completion_queue(cqueue);
Data_Get_Struct(self, grpc_rb_channel, wrapper); Data_Get_Struct(self, grpc_rb_channel, wrapper);
ch = wrapper->wrapped; ch = wrapper->wrapped;
if (ch == NULL) { if (ch == NULL) {
rb_raise(rb_eRuntimeError, "closed!"); rb_raise(rb_eRuntimeError, "closed!");
return Qnil;
} }
call = call =
grpc_channel_create_call_old(ch, method_chars, host_chars, grpc_channel_create_call(ch, cq, method_chars, host_chars,
grpc_rb_time_timeval(deadline, grpc_rb_time_timeval(deadline,
/* absolute time */ 0)); /* absolute time */ 0));
if (call == NULL) { if (call == NULL) {
rb_raise(rb_eRuntimeError, "cannot create call with method %s", rb_raise(rb_eRuntimeError, "cannot create call with method %s",
method_chars); method_chars);
return Qnil;
} }
res = grpc_rb_wrap_call(call); res = grpc_rb_wrap_call(call);
/* Make this channel an instance attribute of the call so that is is not GCed /* Make this channel an instance attribute of the call so that it is not GCed
* before the call. */ * before the call. */
rb_ivar_set(res, id_channel, self); rb_ivar_set(res, id_channel, self);
/* Make the completion queue an instance attribute of the call so that it is
* not GCed before the call. */
rb_ivar_set(res, id_cqueue, cqueue);
return res; return res;
} }
@ -240,11 +256,12 @@ void Init_grpc_channel() {
1); 1);
/* Add ruby analogues of the Channel methods. */ /* Add ruby analogues of the Channel methods. */
rb_define_method(rb_cChannel, "create_call", grpc_rb_channel_create_call, 3); rb_define_method(rb_cChannel, "create_call", grpc_rb_channel_create_call, 4);
rb_define_method(rb_cChannel, "destroy", grpc_rb_channel_destroy, 0); rb_define_method(rb_cChannel, "destroy", grpc_rb_channel_destroy, 0);
rb_define_alias(rb_cChannel, "close", "destroy"); rb_define_alias(rb_cChannel, "close", "destroy");
id_channel = rb_intern("__channel"); id_channel = rb_intern("__channel");
id_cqueue = rb_intern("__cqueue");
id_target = rb_intern("__target"); id_target = rb_intern("__target");
rb_define_const(rb_cChannel, "SSL_TARGET", rb_define_const(rb_cChannel, "SSL_TARGET",
ID2SYM(rb_intern(GRPC_SSL_TARGET_NAME_OVERRIDE_ARG))); ID2SYM(rb_intern(GRPC_SSL_TARGET_NAME_OVERRIDE_ARG)));

@ -38,7 +38,6 @@
#include <grpc/grpc.h> #include <grpc/grpc.h>
#include <grpc/support/time.h> #include <grpc/support/time.h>
#include "rb_grpc.h" #include "rb_grpc.h"
#include "rb_event.h"
/* Used to allow grpc_completion_queue_next call to release the GIL */ /* Used to allow grpc_completion_queue_next call to release the GIL */
typedef struct next_call_stack { typedef struct next_call_stack {
@ -140,7 +139,18 @@ static VALUE grpc_rb_completion_queue_next(VALUE self, VALUE timeout) {
/* Blocks until the next event for given tag is available, and returns the /* Blocks until the next event for given tag is available, and returns the
* event. */ * event. */
static VALUE grpc_rb_completion_queue_pluck(VALUE self, VALUE tag, VALUE grpc_rb_completion_queue_pluck(VALUE self, VALUE tag,
VALUE timeout) {
grpc_event *ev = grpc_rb_completion_queue_pluck_event(self, tag, timeout);
if (ev == NULL) {
return Qnil;
}
return grpc_rb_new_event(ev);
}
/* Blocks until the next event for given tag is available, and returns the
* event. */
grpc_event* grpc_rb_completion_queue_pluck_event(VALUE self, VALUE tag,
VALUE timeout) { VALUE timeout) {
next_call_stack next_call; next_call_stack next_call;
MEMZERO(&next_call, next_call_stack, 1); MEMZERO(&next_call, next_call_stack, 1);
@ -151,9 +161,9 @@ static VALUE grpc_rb_completion_queue_pluck(VALUE self, VALUE tag,
rb_thread_call_without_gvl(grpc_rb_completion_queue_pluck_no_gil, rb_thread_call_without_gvl(grpc_rb_completion_queue_pluck_no_gil,
(void *)&next_call, NULL, NULL); (void *)&next_call, NULL, NULL);
if (next_call.event == NULL) { if (next_call.event == NULL) {
return Qnil; return NULL;
} }
return grpc_rb_new_event(next_call.event); return next_call.event;
} }
/* rb_cCompletionQueue is the ruby class that proxies grpc_completion_queue. */ /* rb_cCompletionQueue is the ruby class that proxies grpc_completion_queue. */

@ -40,6 +40,14 @@
/* Gets the wrapped completion queue from the ruby wrapper */ /* Gets the wrapped completion queue from the ruby wrapper */
grpc_completion_queue *grpc_rb_get_wrapped_completion_queue(VALUE v); grpc_completion_queue *grpc_rb_get_wrapped_completion_queue(VALUE v);
/**
* Makes the implementation of CompletionQueue#pluck available in other files
*
* This avoids having code that holds the GIL repeated at multiple sites.
*/
grpc_event* grpc_rb_completion_queue_pluck_event(VALUE cqueue, VALUE tag,
VALUE timeout);
/* rb_cCompletionQueue is the CompletionQueue class whose instances proxy /* rb_cCompletionQueue is the CompletionQueue class whose instances proxy
grpc_completion_queue. */ grpc_completion_queue. */
extern VALUE rb_cCompletionQueue; extern VALUE rb_cCompletionQueue;

@ -1,361 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "rb_event.h"
#include <ruby.h>
#include <grpc/grpc.h>
#include "rb_grpc.h"
#include "rb_byte_buffer.h"
#include "rb_call.h"
#include "rb_metadata.h"
/* grpc_rb_event wraps a grpc_event. It provides a peer ruby object,
* 'mark' to minimize copying when an event is created from ruby. */
typedef struct grpc_rb_event {
/* Holder of ruby objects involved in constructing the channel */
VALUE mark;
/* The actual event */
grpc_event *wrapped;
} grpc_rb_event;
/* rb_mCompletionType is a ruby module that holds the completion type values */
VALUE rb_mCompletionType = Qnil;
/* Destroys Event instances. */
static void grpc_rb_event_free(void *p) {
grpc_rb_event *ev = NULL;
if (p == NULL) {
return;
};
ev = (grpc_rb_event *)p;
/* Deletes the wrapped object if the mark object is Qnil, which indicates
* that no other object is the actual owner. */
if (ev->wrapped != NULL && ev->mark == Qnil) {
grpc_event_finish(ev->wrapped);
rb_warning("event gc: destroyed the c event");
} else {
rb_warning("event gc: did not destroy the c event");
}
xfree(p);
}
/* Protects the mark object from GC */
static void grpc_rb_event_mark(void *p) {
grpc_rb_event *event = NULL;
if (p == NULL) {
return;
}
event = (grpc_rb_event *)p;
if (event->mark != Qnil) {
rb_gc_mark(event->mark);
}
}
static VALUE grpc_rb_event_result(VALUE self);
/* Obtains the type of an event. */
static VALUE grpc_rb_event_type(VALUE self) {
grpc_event *event = NULL;
grpc_rb_event *wrapper = NULL;
Data_Get_Struct(self, grpc_rb_event, wrapper);
if (wrapper->wrapped == NULL) {
rb_raise(rb_eRuntimeError, "finished!");
return Qnil;
}
event = wrapper->wrapped;
switch (event->type) {
case GRPC_QUEUE_SHUTDOWN:
return rb_const_get(rb_mCompletionType, rb_intern("QUEUE_SHUTDOWN"));
case GRPC_READ:
return rb_const_get(rb_mCompletionType, rb_intern("READ"));
case GRPC_WRITE_ACCEPTED:
grpc_rb_event_result(self); /* validates the result */
return rb_const_get(rb_mCompletionType, rb_intern("WRITE_ACCEPTED"));
case GRPC_FINISH_ACCEPTED:
grpc_rb_event_result(self); /* validates the result */
return rb_const_get(rb_mCompletionType, rb_intern("FINISH_ACCEPTED"));
case GRPC_CLIENT_METADATA_READ:
return rb_const_get(rb_mCompletionType,
rb_intern("CLIENT_METADATA_READ"));
case GRPC_FINISHED:
return rb_const_get(rb_mCompletionType, rb_intern("FINISHED"));
case GRPC_SERVER_RPC_NEW:
return rb_const_get(rb_mCompletionType, rb_intern("SERVER_RPC_NEW"));
default:
rb_raise(rb_eRuntimeError, "unrecognized event code for an rpc event:%d",
event->type);
}
return Qnil; /* should not be reached */
}
/* Obtains the tag associated with an event. */
static VALUE grpc_rb_event_tag(VALUE self) {
grpc_event *event = NULL;
grpc_rb_event *wrapper = NULL;
Data_Get_Struct(self, grpc_rb_event, wrapper);
if (wrapper->wrapped == NULL) {
rb_raise(rb_eRuntimeError, "finished!");
return Qnil;
}
event = wrapper->wrapped;
if (event->tag == NULL) {
return Qnil;
}
return (VALUE)event->tag;
}
/* Obtains the call associated with an event. */
static VALUE grpc_rb_event_call(VALUE self) {
grpc_event *event = NULL;
grpc_rb_event *wrapper = NULL;
Data_Get_Struct(self, grpc_rb_event, wrapper);
if (wrapper->wrapped == NULL) {
rb_raise(rb_eRuntimeError, "finished!");
return Qnil;
}
event = wrapper->wrapped;
if (event->call != NULL) {
return grpc_rb_wrap_call(event->call);
}
return Qnil;
}
/* Obtains the metadata associated with an event. */
static VALUE grpc_rb_event_metadata(VALUE self) {
grpc_event *event = NULL;
grpc_rb_event *wrapper = NULL;
grpc_metadata *metadata = NULL;
VALUE key = Qnil;
VALUE new_ary = Qnil;
VALUE result = Qnil;
VALUE value = Qnil;
size_t count = 0;
size_t i = 0;
Data_Get_Struct(self, grpc_rb_event, wrapper);
if (wrapper->wrapped == NULL) {
rb_raise(rb_eRuntimeError, "finished!");
return Qnil;
}
/* Figure out which metadata to read. */
event = wrapper->wrapped;
switch (event->type) {
case GRPC_CLIENT_METADATA_READ:
count = event->data.client_metadata_read.count;
metadata = event->data.client_metadata_read.elements;
break;
case GRPC_FINISHED:
count = event->data.finished.metadata_count;
metadata = event->data.finished.metadata_elements;
break;
case GRPC_SERVER_RPC_NEW:
count = event->data.server_rpc_new.metadata_count;
metadata = event->data.server_rpc_new.metadata_elements;
break;
default:
rb_raise(rb_eRuntimeError,
"bug: bad event type metadata. got %d; want %d|%d:%d",
event->type, GRPC_CLIENT_METADATA_READ, GRPC_FINISHED,
GRPC_SERVER_RPC_NEW);
return Qnil;
}
result = rb_hash_new();
for (i = 0; i < count; i++) {
key = rb_str_new2(metadata[i].key);
value = rb_hash_aref(result, key);
if (value == Qnil) {
value = rb_str_new(metadata[i].value, metadata[i].value_length);
rb_hash_aset(result, key, value);
} else if (TYPE(value) == T_ARRAY) {
/* Add the string to the returned array */
rb_ary_push(value,
rb_str_new(metadata[i].value, metadata[i].value_length));
} else {
/* Add the current value with this key and the new one to an array */
new_ary = rb_ary_new();
rb_ary_push(new_ary, value);
rb_ary_push(new_ary,
rb_str_new(metadata[i].value, metadata[i].value_length));
rb_hash_aset(result, key, new_ary);
}
}
return result;
}
/* Obtains the data associated with an event. */
static VALUE grpc_rb_event_result(VALUE self) {
grpc_event *event = NULL;
grpc_rb_event *wrapper = NULL;
Data_Get_Struct(self, grpc_rb_event, wrapper);
if (wrapper->wrapped == NULL) {
rb_raise(rb_eRuntimeError, "finished!");
return Qnil;
}
event = wrapper->wrapped;
switch (event->type) {
case GRPC_QUEUE_SHUTDOWN:
return Qnil;
case GRPC_READ:
return grpc_rb_byte_buffer_create_with_mark(self, event->data.read);
case GRPC_FINISH_ACCEPTED:
if (event->data.finish_accepted == GRPC_OP_OK) {
return Qnil;
}
rb_raise(rb_eEventError, "finish failed, not sure why (code=%d)",
event->data.finish_accepted);
break;
case GRPC_WRITE_ACCEPTED:
if (event->data.write_accepted == GRPC_OP_OK) {
return Qnil;
}
rb_raise(rb_eEventError, "write failed, not sure why (code=%d)",
event->data.write_accepted);
break;
case GRPC_CLIENT_METADATA_READ:
return grpc_rb_event_metadata(self);
case GRPC_FINISHED:
return rb_struct_new(rb_sStatus, UINT2NUM(event->data.finished.status),
(event->data.finished.details == NULL
? Qnil
: rb_str_new2(event->data.finished.details)),
grpc_rb_event_metadata(self), NULL);
break;
case GRPC_SERVER_RPC_NEW:
return rb_struct_new(
rb_sNewServerRpc, rb_str_new2(event->data.server_rpc_new.method),
rb_str_new2(event->data.server_rpc_new.host),
Data_Wrap_Struct(rb_cTimeVal, GC_NOT_MARKED, GC_DONT_FREE,
(void *)&event->data.server_rpc_new.deadline),
grpc_rb_event_metadata(self), NULL);
default:
rb_raise(rb_eRuntimeError, "unrecognized event code for an rpc event:%d",
event->type);
}
return Qfalse;
}
static VALUE grpc_rb_event_finish(VALUE self) {
grpc_event *event = NULL;
grpc_rb_event *wrapper = NULL;
Data_Get_Struct(self, grpc_rb_event, wrapper);
if (wrapper->wrapped == NULL) { /* already closed */
return Qnil;
}
event = wrapper->wrapped;
grpc_event_finish(event);
wrapper->wrapped = NULL;
wrapper->mark = Qnil;
return Qnil;
}
/* rb_cEvent is the Event class whose instances proxy grpc_event */
VALUE rb_cEvent = Qnil;
/* rb_eEventError is the ruby class of the exception thrown on failures during
rpc event processing. */
VALUE rb_eEventError = Qnil;
void Init_grpc_event() {
rb_eEventError =
rb_define_class_under(rb_mGrpcCore, "EventError", rb_eStandardError);
rb_cEvent = rb_define_class_under(rb_mGrpcCore, "Event", rb_cObject);
/* Prevent allocation or inialization from ruby. */
rb_define_alloc_func(rb_cEvent, grpc_rb_cannot_alloc);
rb_define_method(rb_cEvent, "initialize", grpc_rb_cannot_init, 0);
rb_define_method(rb_cEvent, "initialize_copy", grpc_rb_cannot_init_copy, 1);
/* Accessors for the data available in an event. */
rb_define_method(rb_cEvent, "call", grpc_rb_event_call, 0);
rb_define_method(rb_cEvent, "result", grpc_rb_event_result, 0);
rb_define_method(rb_cEvent, "tag", grpc_rb_event_tag, 0);
rb_define_method(rb_cEvent, "type", grpc_rb_event_type, 0);
rb_define_method(rb_cEvent, "finish", grpc_rb_event_finish, 0);
rb_define_alias(rb_cEvent, "close", "finish");
/* Constants representing the completion types */
rb_mCompletionType =
rb_define_module_under(rb_mGrpcCore, "CompletionType");
rb_define_const(rb_mCompletionType, "QUEUE_SHUTDOWN",
INT2NUM(GRPC_QUEUE_SHUTDOWN));
rb_define_const(rb_mCompletionType, "OP_COMPLETE", INT2NUM(GRPC_OP_COMPLETE));
rb_define_const(rb_mCompletionType, "READ", INT2NUM(GRPC_READ));
rb_define_const(rb_mCompletionType, "WRITE_ACCEPTED",
INT2NUM(GRPC_WRITE_ACCEPTED));
rb_define_const(rb_mCompletionType, "FINISH_ACCEPTED",
INT2NUM(GRPC_FINISH_ACCEPTED));
rb_define_const(rb_mCompletionType, "CLIENT_METADATA_READ",
INT2NUM(GRPC_CLIENT_METADATA_READ));
rb_define_const(rb_mCompletionType, "FINISHED", INT2NUM(GRPC_FINISHED));
rb_define_const(rb_mCompletionType, "SERVER_RPC_NEW",
INT2NUM(GRPC_SERVER_RPC_NEW));
rb_define_const(rb_mCompletionType, "SERVER_SHUTDOWN",
INT2NUM(GRPC_SERVER_SHUTDOWN));
rb_define_const(rb_mCompletionType, "RESERVED",
INT2NUM(GRPC_COMPLETION_DO_NOT_USE));
}
VALUE grpc_rb_new_event(grpc_event *ev) {
grpc_rb_event *wrapper = ALLOC(grpc_rb_event);
wrapper->wrapped = ev;
wrapper->mark = Qnil;
return Data_Wrap_Struct(rb_cEvent, grpc_rb_event_mark, grpc_rb_event_free,
wrapper);
}

@ -1,53 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_RB_EVENT_H_
#define GRPC_RB_EVENT_H_
#include <ruby.h>
#include <grpc/grpc.h>
/* rb_cEvent is the Event class whose instances proxy grpc_event. */
extern VALUE rb_cEvent;
/* rb_cEventError is the ruby class that acts the exception thrown during rpc
event processing. */
extern VALUE rb_eEventError;
/* Used to create new ruby event objects */
VALUE grpc_rb_new_event(grpc_event *ev);
/* Initializes the Event and EventError classes. */
void Init_grpc_event();
#endif /* GRPC_RB_EVENT_H_ */

@ -39,12 +39,9 @@
#include <grpc/grpc.h> #include <grpc/grpc.h>
#include <grpc/support/time.h> #include <grpc/support/time.h>
#include "rb_byte_buffer.h"
#include "rb_call.h" #include "rb_call.h"
#include "rb_channel.h" #include "rb_channel.h"
#include "rb_completion_queue.h" #include "rb_completion_queue.h"
#include "rb_event.h"
#include "rb_metadata.h"
#include "rb_server.h" #include "rb_server.h"
#include "rb_credentials.h" #include "rb_credentials.h"
#include "rb_server_credentials.h" #include "rb_server_credentials.h"
@ -195,7 +192,7 @@ static ID id_inspect;
/* id_to_s is the to_s method found on various ruby objects. */ /* id_to_s is the to_s method found on various ruby objects. */
static ID id_to_s; static ID id_to_s;
/* Converts `a wrapped time constant to a standard time. */ /* Converts a wrapped time constant to a standard time. */
VALUE grpc_rb_time_val_to_time(VALUE self) { VALUE grpc_rb_time_val_to_time(VALUE self) {
gpr_timespec *time_const = NULL; gpr_timespec *time_const = NULL;
Data_Get_Struct(self, gpr_timespec, time_const); Data_Get_Struct(self, gpr_timespec, time_const);
@ -257,16 +254,16 @@ void Init_grpc() {
rb_mGRPC = rb_define_module("GRPC"); rb_mGRPC = rb_define_module("GRPC");
rb_mGrpcCore = rb_define_module_under(rb_mGRPC, "Core"); rb_mGrpcCore = rb_define_module_under(rb_mGRPC, "Core");
rb_sNewServerRpc = rb_struct_define("NewServerRpc", "method", "host", rb_sNewServerRpc = rb_struct_define("NewServerRpc", "method", "host",
"deadline", "metadata", NULL); "deadline", "metadata", "call", NULL);
rb_sStatus = rb_struct_define("Status", "code", "details", "metadata", NULL); rb_sStatus = rb_struct_define("Status", "code", "details", "metadata", NULL);
sym_code = ID2SYM(rb_intern("code"));
sym_details = ID2SYM(rb_intern("details"));
sym_metadata = ID2SYM(rb_intern("metadata"));
Init_grpc_byte_buffer();
Init_grpc_event();
Init_grpc_channel(); Init_grpc_channel();
Init_grpc_completion_queue(); Init_grpc_completion_queue();
Init_grpc_call(); Init_grpc_call();
Init_grpc_credentials(); Init_grpc_credentials();
Init_grpc_metadata();
Init_grpc_server(); Init_grpc_server();
Init_grpc_server_credentials(); Init_grpc_server_credentials();
Init_grpc_status_codes(); Init_grpc_status_codes();

@ -50,6 +50,15 @@ extern VALUE rb_sNewServerRpc;
/* rb_sStruct is the struct that holds status details. */ /* rb_sStruct is the struct that holds status details. */
extern VALUE rb_sStatus; extern VALUE rb_sStatus;
/* sym_code is the symbol for the code attribute of rb_sStatus. */
VALUE sym_code;
/* sym_details is the symbol for the details attribute of rb_sStatus. */
VALUE sym_details;
/* sym_metadata is the symbol for the metadata attribute of rb_sStatus. */
VALUE sym_metadata;
/* GC_NOT_MARKED is used in calls to Data_Wrap_Struct to indicate that the /* GC_NOT_MARKED is used in calls to Data_Wrap_Struct to indicate that the
wrapped struct does not need to participate in ruby gc. */ wrapped struct does not need to participate in ruby gc. */
extern const RUBY_DATA_FUNC GC_NOT_MARKED; extern const RUBY_DATA_FUNC GC_NOT_MARKED;

@ -1,215 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "rb_metadata.h"
#include <ruby.h>
#include <string.h>
#include <grpc/grpc.h>
#include "rb_grpc.h"
/* grpc_rb_metadata wraps a grpc_metadata. It provides a peer ruby object,
* 'mark' to minimize copying when a metadata is created from ruby. */
typedef struct grpc_rb_metadata {
/* Holder of ruby objects involved in constructing the metadata */
VALUE mark;
/* The actual metadata */
grpc_metadata *wrapped;
} grpc_rb_metadata;
/* Destroys Metadata instances. */
static void grpc_rb_metadata_free(void *p) {
if (p == NULL) {
return;
};
/* Because metadata is only created during a call to grpc_call_add_metadata,
* and the call takes ownership of the metadata, this does not free the
* wrapped struct, only the wrapper */
xfree(p);
}
/* Protects the mark object from GC */
static void grpc_rb_metadata_mark(void *p) {
grpc_rb_metadata *md = NULL;
if (p == NULL) {
return;
}
md = (grpc_rb_metadata *)p;
/* If it's not already cleaned up, mark the mark object */
if (md->mark != Qnil && BUILTIN_TYPE(md->mark) != T_NONE) {
rb_gc_mark(md->mark);
}
}
/* Allocates Metadata instances.
Provides safe default values for the Metadata fields. */
static VALUE grpc_rb_metadata_alloc(VALUE cls) {
grpc_rb_metadata *wrapper = ALLOC(grpc_rb_metadata);
wrapper->wrapped = NULL;
wrapper->mark = Qnil;
return Data_Wrap_Struct(cls, grpc_rb_metadata_mark, grpc_rb_metadata_free,
wrapper);
}
/* id_key and id_value are the names of the hidden ivars that preserve the
* original byte_buffer source string */
static ID id_key;
static ID id_value;
/* Initializes Metadata instances. */
static VALUE grpc_rb_metadata_init(VALUE self, VALUE key, VALUE value) {
grpc_rb_metadata *wrapper = NULL;
grpc_metadata *md = ALLOC(grpc_metadata);
/* Use direct pointers to the strings wrapped by the ruby object to avoid
* copying */
Data_Get_Struct(self, grpc_rb_metadata, wrapper);
wrapper->wrapped = md;
if (TYPE(key) == T_SYMBOL) {
md->key = (char *)rb_id2name(SYM2ID(key));
} else { /* StringValueCStr does all other type exclusions for us */
md->key = StringValueCStr(key);
}
md->value = RSTRING_PTR(value);
md->value_length = RSTRING_LEN(value);
/* Save references to the original values on the mark object so that the
* pointers used there are valid for the lifetime of the object. */
wrapper->mark = rb_class_new_instance(0, NULL, rb_cObject);
rb_ivar_set(wrapper->mark, id_key, key);
rb_ivar_set(wrapper->mark, id_value, value);
return self;
}
/* Clones Metadata instances.
Gives Metadata a consistent implementation of Ruby's object copy/dup
protocol. */
static VALUE grpc_rb_metadata_init_copy(VALUE copy, VALUE orig) {
grpc_rb_metadata *orig_md = NULL;
grpc_rb_metadata *copy_md = NULL;
if (copy == orig) {
return copy;
}
/* Raise an error if orig is not a metadata object or a subclass. */
if (TYPE(orig) != T_DATA ||
RDATA(orig)->dfree != (RUBY_DATA_FUNC)grpc_rb_metadata_free) {
rb_raise(rb_eTypeError, "not a %s", rb_obj_classname(rb_cMetadata));
}
Data_Get_Struct(orig, grpc_rb_metadata, orig_md);
Data_Get_Struct(copy, grpc_rb_metadata, copy_md);
/* use ruby's MEMCPY to make a byte-for-byte copy of the metadata wrapper
* object. */
MEMCPY(copy_md, orig_md, grpc_rb_metadata, 1);
return copy;
}
/* Gets the key from a metadata instance. */
static VALUE grpc_rb_metadata_key(VALUE self) {
VALUE key = Qnil;
grpc_rb_metadata *wrapper = NULL;
grpc_metadata *md = NULL;
Data_Get_Struct(self, grpc_rb_metadata, wrapper);
if (wrapper->mark != Qnil) {
key = rb_ivar_get(wrapper->mark, id_key);
if (key != Qnil) {
return key;
}
}
md = wrapper->wrapped;
if (md == NULL || md->key == NULL) {
return Qnil;
}
return rb_str_new2(md->key);
}
/* Gets the value from a metadata instance. */
static VALUE grpc_rb_metadata_value(VALUE self) {
VALUE val = Qnil;
grpc_rb_metadata *wrapper = NULL;
grpc_metadata *md = NULL;
Data_Get_Struct(self, grpc_rb_metadata, wrapper);
if (wrapper->mark != Qnil) {
val = rb_ivar_get(wrapper->mark, id_value);
if (val != Qnil) {
return val;
}
}
md = wrapper->wrapped;
if (md == NULL || md->value == NULL) {
return Qnil;
}
return rb_str_new2(md->value);
}
/* rb_cMetadata is the Metadata class whose instances proxy grpc_metadata. */
VALUE rb_cMetadata = Qnil;
void Init_grpc_metadata() {
rb_cMetadata =
rb_define_class_under(rb_mGrpcCore, "Metadata", rb_cObject);
/* Allocates an object managed by the ruby runtime */
rb_define_alloc_func(rb_cMetadata, grpc_rb_metadata_alloc);
/* Provides a ruby constructor and support for dup/clone. */
rb_define_method(rb_cMetadata, "initialize", grpc_rb_metadata_init, 2);
rb_define_method(rb_cMetadata, "initialize_copy", grpc_rb_metadata_init_copy,
1);
/* Provides accessors for the code and details. */
rb_define_method(rb_cMetadata, "key", grpc_rb_metadata_key, 0);
rb_define_method(rb_cMetadata, "value", grpc_rb_metadata_value, 0);
id_key = rb_intern("__key");
id_value = rb_intern("__value");
}
/* Gets the wrapped metadata from the ruby wrapper */
grpc_metadata *grpc_rb_get_wrapped_metadata(VALUE v) {
grpc_rb_metadata *wrapper = NULL;
Data_Get_Struct(v, grpc_rb_metadata, wrapper);
return wrapper->wrapped;
}

@ -1,53 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_RB_METADATA_H_
#define GRPC_RB_METADATA_H_
#include <grpc/grpc.h>
#include <ruby.h>
/* rb_cMetadata is the Metadata class whose instances proxy grpc_metadata. */
extern VALUE rb_cMetadata;
/* grpc_rb_metadata_create_with_mark creates a grpc_rb_metadata with a ruby mark
* object that will be kept alive while the metadata is alive. */
extern VALUE grpc_rb_metadata_create_with_mark(VALUE mark, grpc_metadata* md);
/* Gets the wrapped metadata from the ruby wrapper */
grpc_metadata* grpc_rb_get_wrapped_metadata(VALUE v);
/* Initializes the Metadata class. */
void Init_grpc_metadata();
#endif /* GRPC_RB_METADATA_H_ */

@ -46,6 +46,9 @@
/* rb_cServer is the ruby class that proxies grpc_server. */ /* rb_cServer is the ruby class that proxies grpc_server. */
VALUE rb_cServer = Qnil; VALUE rb_cServer = Qnil;
/* id_at is the constructor method of the ruby standard Time class. */
static ID id_at;
/* grpc_rb_server wraps a grpc_server. It provides a peer ruby object, /* grpc_rb_server wraps a grpc_server. It provides a peer ruby object,
'mark' to minimize copying when a server is created from ruby. */ 'mark' to minimize copying when a server is created from ruby. */
typedef struct grpc_rb_server { typedef struct grpc_rb_server {
@ -152,18 +155,89 @@ static VALUE grpc_rb_server_init_copy(VALUE copy, VALUE orig) {
return copy; return copy;
} }
static VALUE grpc_rb_server_request_call(VALUE self, VALUE tag_new) { /* request_call_stack holds various values used by the
grpc_call_error err; * grpc_rb_server_request_call function */
typedef struct request_call_stack {
grpc_call_details details;
grpc_metadata_array md_ary;
} request_call_stack;
/* grpc_request_call_stack_init ensures the request_call_stack is properly
* initialized */
static void grpc_request_call_stack_init(request_call_stack* st) {
MEMZERO(st, request_call_stack, 1);
grpc_metadata_array_init(&st->md_ary);
grpc_call_details_init(&st->details);
st->details.method = NULL;
st->details.host = NULL;
}
/* grpc_request_call_stack_cleanup ensures the request_call_stack is properly
* cleaned up */
static void grpc_request_call_stack_cleanup(request_call_stack* st) {
grpc_metadata_array_destroy(&st->md_ary);
grpc_call_details_destroy(&st->details);
}
/* call-seq:
cq = CompletionQueue.new
tag = Object.new
timeout = 10
server.request_call(cqueue, tag, timeout)
Requests notification of a new call on a server. */
static VALUE grpc_rb_server_request_call(VALUE self, VALUE cqueue,
VALUE tag_new, VALUE timeout) {
grpc_rb_server *s = NULL; grpc_rb_server *s = NULL;
grpc_call *call = NULL;
grpc_event *ev = NULL;
grpc_call_error err;
request_call_stack st;
VALUE result;
Data_Get_Struct(self, grpc_rb_server, s); Data_Get_Struct(self, grpc_rb_server, s);
if (s->wrapped == NULL) { if (s->wrapped == NULL) {
rb_raise(rb_eRuntimeError, "closed!"); rb_raise(rb_eRuntimeError, "closed!");
return Qnil;
} else { } else {
err = grpc_server_request_call_old(s->wrapped, ROBJECT(tag_new)); grpc_request_call_stack_init(&st);
/* call grpc_server_request_call, then wait for it to complete using
* pluck_event */
err = grpc_server_request_call(
s->wrapped, &call, &st.details, &st.md_ary,
grpc_rb_get_wrapped_completion_queue(cqueue),
ROBJECT(tag_new));
if (err != GRPC_CALL_OK) { if (err != GRPC_CALL_OK) {
rb_raise(rb_eCallError, "server request failed: %s (code=%d)", grpc_request_call_stack_cleanup(&st);
rb_raise(rb_eCallError, "grpc_server_request_call failed: %s (code=%d)",
grpc_call_error_detail_of(err), err); grpc_call_error_detail_of(err), err);
return Qnil;
}
ev = grpc_rb_completion_queue_pluck_event(cqueue, tag_new, timeout);
if (ev == NULL) {
grpc_request_call_stack_cleanup(&st);
return Qnil;
}
if (ev->data.op_complete != GRPC_OP_OK) {
grpc_request_call_stack_cleanup(&st);
grpc_event_finish(ev);
rb_raise(rb_eCallError, "request_call completion failed: (code=%d)",
ev->data.op_complete);
return Qnil;
} }
/* build the NewServerRpc struct result */
result = rb_struct_new(
rb_sNewServerRpc,
rb_str_new2(st.details.method),
rb_str_new2(st.details.host),
rb_funcall(rb_cTime, id_at, 2, INT2NUM(st.details.deadline.tv_sec),
INT2NUM(st.details.deadline.tv_nsec)),
grpc_rb_md_ary_to_h(&st.md_ary),
grpc_rb_wrap_call(call),
NULL);
grpc_event_finish(ev);
grpc_request_call_stack_cleanup(&st);
return result;
} }
return Qnil; return Qnil;
} }
@ -249,12 +323,13 @@ void Init_grpc_server() {
rb_define_method(rb_cServer, "initialize_copy", grpc_rb_server_init_copy, 1); rb_define_method(rb_cServer, "initialize_copy", grpc_rb_server_init_copy, 1);
/* Add the server methods. */ /* Add the server methods. */
rb_define_method(rb_cServer, "request_call", grpc_rb_server_request_call, 1); rb_define_method(rb_cServer, "request_call", grpc_rb_server_request_call, 3);
rb_define_method(rb_cServer, "start", grpc_rb_server_start, 0); rb_define_method(rb_cServer, "start", grpc_rb_server_start, 0);
rb_define_method(rb_cServer, "destroy", grpc_rb_server_destroy, 0); rb_define_method(rb_cServer, "destroy", grpc_rb_server_destroy, 0);
rb_define_alias(rb_cServer, "close", "destroy"); rb_define_alias(rb_cServer, "close", "destroy");
rb_define_method(rb_cServer, "add_http2_port", grpc_rb_server_add_http2_port, rb_define_method(rb_cServer, "add_http2_port", grpc_rb_server_add_http2_port,
-1); -1);
id_at = rb_intern("at");
} }
/* Gets the wrapped server from the ruby wrapper */ /* Gets the wrapped server from the ruby wrapper */

@ -31,7 +31,6 @@ require 'grpc/errors'
require 'grpc/grpc' require 'grpc/grpc'
require 'grpc/logconfig' require 'grpc/logconfig'
require 'grpc/version' require 'grpc/version'
require 'grpc/core/event'
require 'grpc/core/time_consts' require 'grpc/core/time_consts'
require 'grpc/generic/active_call' require 'grpc/generic/active_call'
require 'grpc/generic/client_stub' require 'grpc/generic/client_stub'

@ -1,44 +0,0 @@
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
require 'grpc'
# GRPC contains the General RPC module.
module GRPC
module Core
# Event is a class defined in the c extension
#
# Here, we add an inspect method.
class Event
def inspect
"<#{self.class}: type:#{type}, tag:#{tag} result:#{result}>"
end
end
end
end

@ -31,10 +31,6 @@ require 'grpc'
# GRPC contains the General RPC module. # GRPC contains the General RPC module.
module GRPC module GRPC
# OutOfTime is an exception class that indicates that an RPC exceeded its
# deadline.
OutOfTime = Class.new(StandardError)
# BadStatus is an exception class that indicates that an error occurred at # BadStatus is an exception class that indicates that an error occurred at
# either end of a GRPC connection. When raised, it indicates that a status # either end of a GRPC connection. When raised, it indicates that a status
# error should be returned to the other end of a GRPC connection; when # error should be returned to the other end of a GRPC connection; when

@ -30,20 +30,14 @@
require 'forwardable' require 'forwardable'
require 'grpc/generic/bidi_call' require 'grpc/generic/bidi_call'
def assert_event_type(ev, want)
fail OutOfTime if ev.nil?
got = ev.type
fail "Unexpected rpc event: got #{got}, want #{want}" unless got == want
end
# GRPC contains the General RPC module. # GRPC contains the General RPC module.
module GRPC module GRPC
# The ActiveCall class provides simple methods for sending marshallable # The ActiveCall class provides simple methods for sending marshallable
# data to a call # data to a call
class ActiveCall class ActiveCall
include Core::CompletionType
include Core::StatusCodes include Core::StatusCodes
include Core::TimeConsts include Core::TimeConsts
include Core::CallOps
attr_reader(:deadline) attr_reader(:deadline)
# client_invoke begins a client invocation. # client_invoke begins a client invocation.
@ -61,15 +55,14 @@ module GRPC
# @param q [CompletionQueue] the completion queue # @param q [CompletionQueue] the completion queue
# @param deadline [Fixnum,TimeSpec] the deadline # @param deadline [Fixnum,TimeSpec] the deadline
def self.client_invoke(call, q, _deadline, **kw) def self.client_invoke(call, q, _deadline, **kw)
fail(ArgumentError, 'not a call') unless call.is_a? Core::Call fail(TypeError, '!Core::Call') unless call.is_a? Core::Call
unless q.is_a? Core::CompletionQueue unless q.is_a? Core::CompletionQueue
fail(ArgumentError, 'not a CompletionQueue') fail(TypeError, '!Core::CompletionQueue')
end end
call.add_metadata(kw) if kw.length > 0 metadata_tag = Object.new
client_metadata_read = Object.new call.run_batch(q, metadata_tag, INFINITE_FUTURE,
finished_tag = Object.new SEND_INITIAL_METADATA => kw)
call.invoke(q, client_metadata_read, finished_tag) metadata_tag
[finished_tag, client_metadata_read]
end end
# Creates an ActiveCall. # Creates an ActiveCall.
@ -91,25 +84,21 @@ module GRPC
# @param marshal [Function] f(obj)->string that marshal requests # @param marshal [Function] f(obj)->string that marshal requests
# @param unmarshal [Function] f(string)->obj that unmarshals responses # @param unmarshal [Function] f(string)->obj that unmarshals responses
# @param deadline [Fixnum] the deadline for the call to complete # @param deadline [Fixnum] the deadline for the call to complete
# @param finished_tag [Object] the object used as the call's finish tag, # @param metadata_tag [Object] the object use obtain metadata for clients
# if the call has begun
# @param read_metadata_tag [Object] the object used as the call's finish
# tag, if the call has begun
# @param started [true|false] indicates if the call has begun # @param started [true|false] indicates if the call has begun
def initialize(call, q, marshal, unmarshal, deadline, finished_tag: nil, def initialize(call, q, marshal, unmarshal, deadline, started: true,
read_metadata_tag: nil, started: true) metadata_tag: nil)
fail(ArgumentError, 'not a call') unless call.is_a? Core::Call fail(TypeError, '!Core::Call') unless call.is_a? Core::Call
unless q.is_a? Core::CompletionQueue unless q.is_a? Core::CompletionQueue
fail(ArgumentError, 'not a CompletionQueue') fail(TypeError, '!Core::CompletionQueue')
end end
@call = call @call = call
@cq = q @cq = q
@deadline = deadline @deadline = deadline
@finished_tag = finished_tag
@read_metadata_tag = read_metadata_tag
@marshal = marshal @marshal = marshal
@started = started @started = started
@unmarshal = unmarshal @unmarshal = unmarshal
@metadata_tag = metadata_tag
end end
# Obtains the status of the call. # Obtains the status of the call.
@ -176,51 +165,38 @@ module GRPC
# writes_done indicates that all writes are completed. # writes_done indicates that all writes are completed.
# #
# It blocks until the remote endpoint acknowledges by sending a FINISHED # It blocks until the remote endpoint acknowledges with at status unless
# event, unless assert_finished is set to false. Any calls to # assert_finished is set to false. Any calls to #remote_send after this
# #remote_send after this call will fail. # call will fail.
# #
# @param assert_finished [true, false] when true(default), waits for # @param assert_finished [true, false] when true(default), waits for
# FINISHED. # FINISHED.
def writes_done(assert_finished = true) def writes_done(assert_finished = true)
@call.writes_done(self) ops = {
ev = @cq.pluck(self, INFINITE_FUTURE) SEND_CLOSE_FROM_CLIENT => nil
begin }
assert_event_type(ev, FINISH_ACCEPTED) ops[RECV_STATUS_ON_CLIENT] = nil if assert_finished
logger.debug("Writes done: waiting for finish? #{assert_finished}") @call.run_batch(@cq, self, INFINITE_FUTURE, ops)
ensure
ev.close
end
return unless assert_finished return unless assert_finished
ev = @cq.pluck(@finished_tag, INFINITE_FUTURE)
fail 'unexpected nil event' if ev.nil?
ev.close
@call.status @call.status
end end
# finished waits until the call is completed. # finished waits until a client call is completed.
# #
# It blocks until the remote endpoint acknowledges by sending a FINISHED # It blocks until the remote endpoint acknowledges by sending a status.
# event.
def finished def finished
ev = @cq.pluck(@finished_tag, INFINITE_FUTURE) batch_result = @call.run_batch(@cq, self, INFINITE_FUTURE,
begin RECV_STATUS_ON_CLIENT => nil)
fail "unexpected event: #{ev.inspect}" unless ev.type == FINISHED
if @call.metadata.nil? if @call.metadata.nil?
@call.metadata = ev.result.metadata @call.metadata = batch_result.metadata
else elsif !batch_result.metadata.nil?
@call.metadata.merge!(ev.result.metadata) @call.metadata.merge!(batch_result.metadata)
end
if ev.result.code != Core::StatusCodes::OK
fail BadStatus.new(ev.result.code, ev.result.details)
end end
res = ev.result if batch_result.status.code != Core::StatusCodes::OK
ensure fail BadStatus.new(batch_result.status.code,
ev.close batch_result.status.details)
end end
res batch_result
end end
# remote_send sends a request to the remote endpoint. # remote_send sends a request to the remote endpoint.
@ -232,73 +208,51 @@ module GRPC
# @param marshalled [false, true] indicates if the object is already # @param marshalled [false, true] indicates if the object is already
# marshalled. # marshalled.
def remote_send(req, marshalled = false) def remote_send(req, marshalled = false)
assert_queue_is_ready
logger.debug("sending #{req.inspect}, marshalled? #{marshalled}") logger.debug("sending #{req.inspect}, marshalled? #{marshalled}")
if marshalled if marshalled
payload = req payload = req
else else
payload = @marshal.call(req) payload = @marshal.call(req)
end end
@call.start_write(Core::ByteBuffer.new(payload), self) @call.run_batch(@cq, self, INFINITE_FUTURE, SEND_MESSAGE => payload)
# call queue#pluck, and wait for WRITE_ACCEPTED, so as not to return
# until the flow control allows another send on this call.
ev = @cq.pluck(self, INFINITE_FUTURE)
begin
assert_event_type(ev, WRITE_ACCEPTED)
ensure
ev.close
end
end end
# send_status sends a status to the remote endpoint # send_status sends a status to the remote endpoint.
# #
# @param code [int] the status code to send # @param code [int] the status code to send
# @param details [String] details # @param details [String] details
# @param assert_finished [true, false] when true(default), waits for # @param assert_finished [true, false] when true(default), waits for
# FINISHED. # FINISHED.
def send_status(code = OK, details = '', assert_finished = false) def send_status(code = OK, details = '', assert_finished = false)
assert_queue_is_ready ops = {
@call.start_write_status(code, details, self) SEND_STATUS_FROM_SERVER => Struct::Status.new(code, details)
ev = @cq.pluck(self, INFINITE_FUTURE) }
begin ops[RECV_CLOSE_ON_SERVER] = nil if assert_finished
assert_event_type(ev, FINISH_ACCEPTED) @call.run_batch(@cq, self, INFINITE_FUTURE, ops)
ensure
ev.close
end
logger.debug("Status sent: #{code}:'#{details}'")
return finished if assert_finished
nil nil
end end
# remote_read reads a response from the remote endpoint. # remote_read reads a response from the remote endpoint.
# #
# It blocks until the remote endpoint sends a READ or FINISHED event. On # It blocks until the remote endpoint replies with a message or status.
# a READ, it returns the response after unmarshalling it. On # On receiving a message, it returns the response after unmarshalling it.
# FINISHED, it returns nil if the status is OK, otherwise raising # On receiving a status, it returns nil if the status is OK, otherwise
# BadStatus # raising BadStatus
def remote_read def remote_read
if @call.metadata.nil? && !@read_metadata_tag.nil? ops = { RECV_MESSAGE => nil }
ev = @cq.pluck(@read_metadata_tag, INFINITE_FUTURE) ops[RECV_INITIAL_METADATA] = nil unless @metadata_tag.nil?
assert_event_type(ev, CLIENT_METADATA_READ) batch_result = @call.run_batch(@cq, self, INFINITE_FUTURE, ops)
@call.metadata = ev.result unless @metadata_tag.nil?
@read_metadata_tag = nil @call.metadata = batch_result.metadata
end @metadata_tag = nil
end
@call.start_read(self) logger.debug("received req: #{batch_result}")
ev = @cq.pluck(self, INFINITE_FUTURE) unless batch_result.nil? || batch_result.message.nil?
begin logger.debug("received req.to_s: #{batch_result.message}")
assert_event_type(ev, READ) res = @unmarshal.call(batch_result.message)
logger.debug("received req: #{ev.result.inspect}")
unless ev.result.nil?
logger.debug("received req.to_s: #{ev.result}")
res = @unmarshal.call(ev.result.to_s)
logger.debug("received_req (unmarshalled): #{res.inspect}") logger.debug("received_req (unmarshalled): #{res.inspect}")
return res return res
end end
ensure
ev.close
end
logger.debug('found nil; the final response has been sent') logger.debug('found nil; the final response has been sent')
nil nil
end end
@ -324,7 +278,6 @@ module GRPC
return enum_for(:each_remote_read) unless block_given? return enum_for(:each_remote_read) unless block_given?
loop do loop do
resp = remote_read resp = remote_read
break if resp.is_a? Struct::Status # is an OK status
break if resp.nil? # the last response was received break if resp.nil? # the last response was received
yield resp yield resp
end end
@ -461,8 +414,7 @@ module GRPC
# @return [Enumerator, nil] a response Enumerator # @return [Enumerator, nil] a response Enumerator
def bidi_streamer(requests, **kw, &blk) def bidi_streamer(requests, **kw, &blk)
start_call(**kw) unless @started start_call(**kw) unless @started
bd = BidiCall.new(@call, @cq, @marshal, @unmarshal, @deadline, bd = BidiCall.new(@call, @cq, @marshal, @unmarshal, @deadline)
@finished_tag)
bd.run_on_client(requests, &blk) bd.run_on_client(requests, &blk)
end end
@ -478,8 +430,7 @@ module GRPC
# #
# @param gen_each_reply [Proc] generates the BiDi stream replies # @param gen_each_reply [Proc] generates the BiDi stream replies
def run_server_bidi(gen_each_reply) def run_server_bidi(gen_each_reply)
bd = BidiCall.new(@call, @cq, @marshal, @unmarshal, @deadline, bd = BidiCall.new(@call, @cq, @marshal, @unmarshal, @deadline)
@finished_tag)
bd.run_on_server(gen_each_reply) bd.run_on_server(gen_each_reply)
end end
@ -516,21 +467,5 @@ module GRPC
# a Operation on the client. # a Operation on the client.
Operation = view_class(:cancel, :cancelled, :deadline, :execute, Operation = view_class(:cancel, :cancelled, :deadline, :execute,
:metadata, :status) :metadata, :status)
# confirms that no events are enqueued, and that the queue is not
# shutdown.
def assert_queue_is_ready
ev = nil
begin
ev = @cq.pluck(self, ZERO)
fail "unexpected event #{ev.inspect}" unless ev.nil?
rescue OutOfTime
logging.debug('timed out waiting for next event')
# expected, nothing should be on the queue and the deadline was ZERO,
# except things using another tag
ensure
ev.close unless ev.nil?
end
end
end end
end end

@ -30,18 +30,12 @@
require 'forwardable' require 'forwardable'
require 'grpc/grpc' require 'grpc/grpc'
def assert_event_type(ev, want)
fail OutOfTime if ev.nil?
got = ev.type
fail("Unexpected rpc event: got #{got}, want #{want}") unless got == want
end
# GRPC contains the General RPC module. # GRPC contains the General RPC module.
module GRPC module GRPC
# The BiDiCall class orchestrates exection of a BiDi stream on a client or # The BiDiCall class orchestrates exection of a BiDi stream on a client or
# server. # server.
class BidiCall class BidiCall
include Core::CompletionType include Core::CallOps
include Core::StatusCodes include Core::StatusCodes
include Core::TimeConsts include Core::TimeConsts
@ -63,8 +57,7 @@ module GRPC
# @param marshal [Function] f(obj)->string that marshal requests # @param marshal [Function] f(obj)->string that marshal requests
# @param unmarshal [Function] f(string)->obj that unmarshals responses # @param unmarshal [Function] f(string)->obj that unmarshals responses
# @param deadline [Fixnum] the deadline for the call to complete # @param deadline [Fixnum] the deadline for the call to complete
# @param finished_tag [Object] the object used as the call's finish tag, def initialize(call, q, marshal, unmarshal, deadline)
def initialize(call, q, marshal, unmarshal, deadline, finished_tag)
fail(ArgumentError, 'not a call') unless call.is_a? Core::Call fail(ArgumentError, 'not a call') unless call.is_a? Core::Call
unless q.is_a? Core::CompletionQueue unless q.is_a? Core::CompletionQueue
fail(ArgumentError, 'not a CompletionQueue') fail(ArgumentError, 'not a CompletionQueue')
@ -72,7 +65,6 @@ module GRPC
@call = call @call = call
@cq = q @cq = q
@deadline = deadline @deadline = deadline
@finished_tag = finished_tag
@marshal = marshal @marshal = marshal
@readq = Queue.new @readq = Queue.new
@unmarshal = unmarshal @unmarshal = unmarshal
@ -146,30 +138,14 @@ module GRPC
requests.each do |req| requests.each do |req|
count += 1 count += 1
payload = @marshal.call(req) payload = @marshal.call(req)
@call.start_write(Core::ByteBuffer.new(payload), write_tag) @call.run_batch(@cq, write_tag, INFINITE_FUTURE,
ev = @cq.pluck(write_tag, INFINITE_FUTURE) SEND_MESSAGE => payload)
begin
assert_event_type(ev, WRITE_ACCEPTED)
ensure
ev.close
end
end end
if is_client if is_client
@call.writes_done(write_tag)
ev = @cq.pluck(write_tag, INFINITE_FUTURE)
begin
assert_event_type(ev, FINISH_ACCEPTED)
ensure
ev.close
end
logger.debug("bidi-client: sent #{count} reqs, waiting to finish") logger.debug("bidi-client: sent #{count} reqs, waiting to finish")
ev = @cq.pluck(@finished_tag, INFINITE_FUTURE) @call.run_batch(@cq, write_tag, INFINITE_FUTURE,
begin SEND_CLOSE_FROM_CLIENT => nil,
assert_event_type(ev, FINISHED) RECV_STATUS_ON_CLIENT => nil)
ensure
ev.close
end
logger.debug('bidi-client: finished received')
end end
rescue StandardError => e rescue StandardError => e
logger.warn('bidi: write_loop failed') logger.warn('bidi: write_loop failed')
@ -189,25 +165,20 @@ module GRPC
loop do loop do
logger.debug("waiting for read #{count}") logger.debug("waiting for read #{count}")
count += 1 count += 1
@call.start_read(read_tag) # TODO: ensure metadata is read if available, currently it's not
ev = @cq.pluck(read_tag, INFINITE_FUTURE) batch_result = @call.run_batch(@cq, read_tag, INFINITE_FUTURE,
begin RECV_MESSAGE => nil)
assert_event_type(ev, READ) # handle the next message
if batch_result.message.nil?
# handle the next event.
if ev.result.nil?
@readq.push(END_OF_READS) @readq.push(END_OF_READS)
logger.debug('done reading!') logger.debug('done reading!')
break break
end end
# push the latest read onto the queue and continue reading # push the latest read onto the queue and continue reading
logger.debug("received req: #{ev.result}") logger.debug("received req: #{batch_result.message}")
res = @unmarshal.call(ev.result.to_s) res = @unmarshal.call(batch_result.message)
@readq.push(res) @readq.push(res)
ensure
ev.close
end
end end
rescue StandardError => e rescue StandardError => e

@ -35,9 +35,10 @@ module GRPC
# ClientStub represents an endpoint used to send requests to GRPC servers. # ClientStub represents an endpoint used to send requests to GRPC servers.
class ClientStub class ClientStub
include Core::StatusCodes include Core::StatusCodes
include Core::TimeConsts
# Default deadline is 5 seconds. # Default timeout is 5 seconds.
DEFAULT_DEADLINE = 5 DEFAULT_TIMEOUT = 5
# setup_channel is used by #initialize to constuct a channel from its # setup_channel is used by #initialize to constuct a channel from its
# arguments. # arguments.
@ -76,8 +77,8 @@ module GRPC
# present the host and arbitrary keyword arg areignored, and the RPC # present the host and arbitrary keyword arg areignored, and the RPC
# connection uses this channel. # connection uses this channel.
# #
# - :deadline # - :timeout
# when present, this is the default deadline used for calls # when present, this is the default timeout used for calls
# #
# - :update_metadata # - :update_metadata
# when present, this a func that takes a hash and returns a hash # when present, this a func that takes a hash and returns a hash
@ -87,13 +88,13 @@ module GRPC
# @param host [String] the host the stub connects to # @param host [String] the host the stub connects to
# @param q [Core::CompletionQueue] used to wait for events # @param q [Core::CompletionQueue] used to wait for events
# @param channel_override [Core::Channel] a pre-created channel # @param channel_override [Core::Channel] a pre-created channel
# @param deadline [Number] the default deadline to use in requests # @param timeout [Number] the default timeout to use in requests
# @param creds [Core::Credentials] the channel # @param creds [Core::Credentials] the channel
# @param update_metadata a func that updates metadata as described above # @param update_metadata a func that updates metadata as described above
# @param kw [KeywordArgs]the channel arguments # @param kw [KeywordArgs]the channel arguments
def initialize(host, q, def initialize(host, q,
channel_override: nil, channel_override: nil,
deadline: DEFAULT_DEADLINE, timeout: nil,
creds: nil, creds: nil,
update_metadata: nil, update_metadata: nil,
**kw) **kw)
@ -103,7 +104,7 @@ module GRPC
@update_metadata = ClientStub.check_update_metadata(update_metadata) @update_metadata = ClientStub.check_update_metadata(update_metadata)
alt_host = kw[Core::Channel::SSL_TARGET] alt_host = kw[Core::Channel::SSL_TARGET]
@host = alt_host.nil? ? host : alt_host @host = alt_host.nil? ? host : alt_host
@deadline = deadline @timeout = timeout.nil? ? DEFAULT_TIMEOUT : timeout
end end
# request_response sends a request to a GRPC server, and returns the # request_response sends a request to a GRPC server, and returns the
@ -140,12 +141,12 @@ module GRPC
# @param req [Object] the request sent to the server # @param req [Object] the request sent to the server
# @param marshal [Function] f(obj)->string that marshals requests # @param marshal [Function] f(obj)->string that marshals requests
# @param unmarshal [Function] f(string)->obj that unmarshals responses # @param unmarshal [Function] f(string)->obj that unmarshals responses
# @param deadline [Numeric] (optional) the max completion time in seconds # @param timeout [Numeric] (optional) the max completion time in seconds
# @param return_op [true|false] return an Operation if true # @param return_op [true|false] return an Operation if true
# @return [Object] the response received from the server # @return [Object] the response received from the server
def request_response(method, req, marshal, unmarshal, deadline = nil, def request_response(method, req, marshal, unmarshal, timeout = nil,
return_op: false, **kw) return_op: false, **kw)
c = new_active_call(method, marshal, unmarshal, deadline || @deadline) c = new_active_call(method, marshal, unmarshal, timeout)
md = @update_metadata.nil? ? kw : @update_metadata.call(kw.clone) md = @update_metadata.nil? ? kw : @update_metadata.call(kw.clone)
return c.request_response(req, **md) unless return_op return c.request_response(req, **md) unless return_op
@ -197,12 +198,12 @@ module GRPC
# @param requests [Object] an Enumerable of requests to send # @param requests [Object] an Enumerable of requests to send
# @param marshal [Function] f(obj)->string that marshals requests # @param marshal [Function] f(obj)->string that marshals requests
# @param unmarshal [Function] f(string)->obj that unmarshals responses # @param unmarshal [Function] f(string)->obj that unmarshals responses
# @param deadline [Numeric] the max completion time in seconds # @param timeout [Numeric] the max completion time in seconds
# @param return_op [true|false] return an Operation if true # @param return_op [true|false] return an Operation if true
# @return [Object|Operation] the response received from the server # @return [Object|Operation] the response received from the server
def client_streamer(method, requests, marshal, unmarshal, deadline = nil, def client_streamer(method, requests, marshal, unmarshal, timeout = nil,
return_op: false, **kw) return_op: false, **kw)
c = new_active_call(method, marshal, unmarshal, deadline || @deadline) c = new_active_call(method, marshal, unmarshal, timeout)
md = @update_metadata.nil? ? kw : @update_metadata.call(kw.clone) md = @update_metadata.nil? ? kw : @update_metadata.call(kw.clone)
return c.client_streamer(requests, **md) unless return_op return c.client_streamer(requests, **md) unless return_op
@ -262,13 +263,13 @@ module GRPC
# @param req [Object] the request sent to the server # @param req [Object] the request sent to the server
# @param marshal [Function] f(obj)->string that marshals requests # @param marshal [Function] f(obj)->string that marshals requests
# @param unmarshal [Function] f(string)->obj that unmarshals responses # @param unmarshal [Function] f(string)->obj that unmarshals responses
# @param deadline [Numeric] the max completion time in seconds # @param timeout [Numeric] the max completion time in seconds
# @param return_op [true|false]return an Operation if true # @param return_op [true|false]return an Operation if true
# @param blk [Block] when provided, is executed for each response # @param blk [Block] when provided, is executed for each response
# @return [Enumerator|Operation|nil] as discussed above # @return [Enumerator|Operation|nil] as discussed above
def server_streamer(method, req, marshal, unmarshal, deadline = nil, def server_streamer(method, req, marshal, unmarshal, timeout = nil,
return_op: false, **kw, &blk) return_op: false, **kw, &blk)
c = new_active_call(method, marshal, unmarshal, deadline || @deadline) c = new_active_call(method, marshal, unmarshal, timeout)
md = @update_metadata.nil? ? kw : @update_metadata.call(kw.clone) md = @update_metadata.nil? ? kw : @update_metadata.call(kw.clone)
return c.server_streamer(req, **md, &blk) unless return_op return c.server_streamer(req, **md, &blk) unless return_op
@ -367,13 +368,13 @@ module GRPC
# @param requests [Object] an Enumerable of requests to send # @param requests [Object] an Enumerable of requests to send
# @param marshal [Function] f(obj)->string that marshals requests # @param marshal [Function] f(obj)->string that marshals requests
# @param unmarshal [Function] f(string)->obj that unmarshals responses # @param unmarshal [Function] f(string)->obj that unmarshals responses
# @param deadline [Numeric] (optional) the max completion time in seconds # @param timeout [Numeric] (optional) the max completion time in seconds
# @param blk [Block] when provided, is executed for each response # @param blk [Block] when provided, is executed for each response
# @param return_op [true|false] return an Operation if true # @param return_op [true|false] return an Operation if true
# @return [Enumerator|nil|Operation] as discussed above # @return [Enumerator|nil|Operation] as discussed above
def bidi_streamer(method, requests, marshal, unmarshal, deadline = nil, def bidi_streamer(method, requests, marshal, unmarshal, timeout = nil,
return_op: false, **kw, &blk) return_op: false, **kw, &blk)
c = new_active_call(method, marshal, unmarshal, deadline || @deadline) c = new_active_call(method, marshal, unmarshal, timeout)
md = @update_metadata.nil? ? kw : @update_metadata.call(kw.clone) md = @update_metadata.nil? ? kw : @update_metadata.call(kw.clone)
return c.bidi_streamer(requests, **md, &blk) unless return_op return c.bidi_streamer(requests, **md, &blk) unless return_op
@ -390,15 +391,14 @@ module GRPC
# Creates a new active stub # Creates a new active stub
# #
# @param ch [GRPC::Channel] the channel used to create the stub. # @param method [string] the method being called.
# @param marshal [Function] f(obj)->string that marshals requests # @param marshal [Function] f(obj)->string that marshals requests
# @param unmarshal [Function] f(string)->obj that unmarshals responses # @param unmarshal [Function] f(string)->obj that unmarshals responses
# @param deadline [TimeConst] # @param timeout [TimeConst]
def new_active_call(ch, marshal, unmarshal, deadline = nil) def new_active_call(method, marshal, unmarshal, timeout = nil)
absolute_deadline = Core::TimeConsts.from_relative_time(deadline) deadline = from_relative_time(timeout.nil? ? @timeout : timeout)
call = @ch.create_call(ch, @host, absolute_deadline) call = @ch.create_call(@queue, method, @host, deadline)
ActiveCall.new(call, @queue, marshal, unmarshal, absolute_deadline, ActiveCall.new(call, @queue, marshal, unmarshal, deadline, started: false)
started: false)
end end
end end
end end

@ -81,7 +81,6 @@ module GRPC
active_call.run_server_bidi(mth) active_call.run_server_bidi(mth)
end end
send_status(active_call, OK, 'OK') send_status(active_call, OK, 'OK')
active_call.finished
rescue BadStatus => e rescue BadStatus => e
# this is raised by handlers that want GRPC to send an application # this is raised by handlers that want GRPC to send an application
# error code and detail message. # error code and detail message.
@ -91,15 +90,11 @@ module GRPC
# This is raised by GRPC internals but should rarely, if ever happen. # This is raised by GRPC internals but should rarely, if ever happen.
# Log it, but don't notify the other endpoint.. # Log it, but don't notify the other endpoint..
logger.warn("failed call: #{active_call}\n#{e}") logger.warn("failed call: #{active_call}\n#{e}")
rescue OutOfTime rescue Core::OutOfTime
# This is raised when active_call#method.call exceeeds the deadline # This is raised when active_call#method.call exceeeds the deadline
# event. Send a status of deadline exceeded # event. Send a status of deadline exceeded
logger.warn("late call: #{active_call}") logger.warn("late call: #{active_call}")
send_status(active_call, DEADLINE_EXCEEDED, 'late') send_status(active_call, DEADLINE_EXCEEDED, 'late')
rescue Core::EventError => e
# This is raised by GRPC internals but should rarely, if ever happen.
# Log it, but don't notify the other endpoint..
logger.warn("failed call: #{active_call}\n#{e}")
rescue StandardError => e rescue StandardError => e
# This will usuaally be an unhandled error in the handling code. # This will usuaally be an unhandled error in the handling code.
# Send back a UNKNOWN status to the client # Send back a UNKNOWN status to the client
@ -142,7 +137,7 @@ module GRPC
def send_status(active_client, code, details) def send_status(active_client, code, details)
details = 'Not sure why' if details.nil? details = 'Not sure why' if details.nil?
active_client.send_status(code, details) active_client.send_status(code, details, code == OK)
rescue StandardError => e rescue StandardError => e
logger.warn("Could not send status #{code}:#{details}") logger.warn("Could not send status #{code}:#{details}")
logger.warn(e) logger.warn(e)

@ -38,7 +38,7 @@ module GRPC
# RpcServer hosts a number of services and makes them available on the # RpcServer hosts a number of services and makes them available on the
# network. # network.
class RpcServer class RpcServer
include Core::CompletionType include Core::CallOps
include Core::TimeConsts include Core::TimeConsts
extend ::Forwardable extend ::Forwardable
@ -202,20 +202,14 @@ module GRPC
end end
@pool.start @pool.start
@server.start @server.start
server_tag = Object.new request_call_tag = Object.new
until stopped? until stopped?
@server.request_call(server_tag) deadline = from_relative_time(@poll_period)
ev = @cq.pluck(server_tag, @poll_period) an_rpc = @server.request_call(@cq, request_call_tag, deadline)
next if ev.nil? next if an_rpc.nil?
if ev.type != SERVER_RPC_NEW c = new_active_server_call(an_rpc)
logger.warn("bad evt: got:#{ev.type}, want:#{SERVER_RPC_NEW}")
ev.close
next
end
c = new_active_server_call(ev.call, ev.result)
unless c.nil? unless c.nil?
mth = ev.result.method.to_sym mth = an_rpc.method.to_sym
ev.close
@pool.schedule(c) do |call| @pool.schedule(c) do |call|
rpc_descs[mth].run_server_method(call, rpc_handlers[mth]) rpc_descs[mth].run_server_method(call, rpc_handlers[mth])
end end
@ -224,46 +218,49 @@ module GRPC
@running = false @running = false
end end
def new_active_server_call(call, new_server_rpc) # Sends UNAVAILABLE if there are too many unprocessed jobs
# Accept the call. This is necessary even if a status is to be sent def available?(an_rpc)
# back immediately
finished_tag = Object.new
call_queue = Core::CompletionQueue.new
call.metadata = new_server_rpc.metadata # store the metadata
call.server_accept(call_queue, finished_tag)
call.server_end_initial_metadata
# Send UNAVAILABLE if there are too many unprocessed jobs
jobs_count, max = @pool.jobs_waiting, @max_waiting_requests jobs_count, max = @pool.jobs_waiting, @max_waiting_requests
logger.info("waiting: #{jobs_count}, max: #{max}") logger.info("waiting: #{jobs_count}, max: #{max}")
if @pool.jobs_waiting > @max_waiting_requests return an_rpc if @pool.jobs_waiting <= @max_waiting_requests
logger.warn("NOT AVAILABLE: too many jobs_waiting: #{new_server_rpc}") logger.warn("NOT AVAILABLE: too many jobs_waiting: #{an_rpc}")
noop = proc { |x| x } noop = proc { |x| x }
c = ActiveCall.new(call, call_queue, noop, noop, c = ActiveCall.new(an_rpc.call, @cq, noop, noop, an_rpc.deadline)
new_server_rpc.deadline,
finished_tag: finished_tag)
c.send_status(StatusCodes::UNAVAILABLE, '') c.send_status(StatusCodes::UNAVAILABLE, '')
return nil nil
end end
# Send NOT_FOUND if the method does not exist # Sends NOT_FOUND if the method can't be found
mth = new_server_rpc.method.to_sym def found?(an_rpc)
unless rpc_descs.key?(mth) mth = an_rpc.method.to_sym
logger.warn("NOT_FOUND: #{new_server_rpc}") return an_rpc if rpc_descs.key?(mth)
logger.warn("NOT_FOUND: #{an_rpc}")
noop = proc { |x| x } noop = proc { |x| x }
c = ActiveCall.new(call, call_queue, noop, noop, c = ActiveCall.new(an_rpc.call, @cq, noop, noop, an_rpc.deadline)
new_server_rpc.deadline,
finished_tag: finished_tag)
c.send_status(StatusCodes::NOT_FOUND, '') c.send_status(StatusCodes::NOT_FOUND, '')
return nil nil
end end
def new_active_server_call(an_rpc)
# Accept the call. This is necessary even if a status is to be sent
# back immediately
return nil if an_rpc.nil? || an_rpc.call.nil?
# allow the metadata to be accessed from the call
handle_call_tag = Object.new
an_rpc.call.metadata = an_rpc.metadata
# TODO: add a hook to send md
an_rpc.call.run_batch(@cq, handle_call_tag, INFINITE_FUTURE,
SEND_INITIAL_METADATA => nil)
return nil unless available?(an_rpc)
return nil unless found?(an_rpc)
# Create the ActiveCall # Create the ActiveCall
rpc_desc = rpc_descs[mth] logger.info("deadline is #{an_rpc.deadline}; (now=#{Time.now})")
logger.info("deadline is #{new_server_rpc.deadline}; (now=#{Time.now})") rpc_desc = rpc_descs[an_rpc.method.to_sym]
ActiveCall.new(call, call_queue, ActiveCall.new(an_rpc.call, @cq,
rpc_desc.marshal_proc, rpc_desc.unmarshal_proc(:input), rpc_desc.marshal_proc, rpc_desc.unmarshal_proc(:input),
new_server_rpc.deadline, finished_tag: finished_tag) an_rpc.deadline)
end end
# Pool is a simple thread pool for running server requests. # Pool is a simple thread pool for running server requests.

@ -1,44 +0,0 @@
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
require 'grpc'
describe 'Wrapped classes where .new cannot create an instance' do
describe GRPC::Core::Event do
it 'should fail .new fail with a runtime error' do
expect { GRPC::Core::Event.new }.to raise_error(TypeError)
end
end
describe GRPC::Core::Call do
it 'should fail .new fail with a runtime error' do
expect { GRPC::Core::Event.new }.to raise_error(TypeError)
end
end
end

@ -1,67 +0,0 @@
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
require 'grpc'
describe GRPC::Core::ByteBuffer do
describe '#new' do
it 'is constructed from a string' do
expect { GRPC::Core::ByteBuffer.new('#new') }.not_to raise_error
end
it 'can be constructed from the empty string' do
expect { GRPC::Core::ByteBuffer.new('') }.not_to raise_error
end
it 'cannot be constructed from nil' do
expect { GRPC::Core::ByteBuffer.new(nil) }.to raise_error TypeError
end
it 'cannot be constructed from non-strings' do
[1, Object.new, :a_symbol].each do |x|
expect { GRPC::Core::ByteBuffer.new(x) }.to raise_error TypeError
end
end
end
describe '#to_s' do
it 'is the string value the ByteBuffer was constructed with' do
expect(GRPC::Core::ByteBuffer.new('#to_s').to_s).to eq('#to_s')
end
end
describe '#dup' do
it 'makes an instance whose #to_s is the original string value' do
bb = GRPC::Core::ByteBuffer.new('#dup')
a_copy = bb.dup
expect(a_copy.to_s).to eq('#dup')
expect(a_copy.dup.to_s).to eq('#dup')
end
end
end

@ -66,51 +66,34 @@ describe GRPC::Core::RpcErrors do
end end
end end
describe GRPC::Core::Call do describe GRPC::Core::CallOps do
before(:each) do before(:each) do
@tag = Object.new @known_types = {
@client_queue = GRPC::Core::CompletionQueue.new SEND_INITIAL_METADATA: 0,
fake_host = 'localhost:10101' SEND_MESSAGE: 1,
@ch = GRPC::Core::Channel.new(fake_host, nil) SEND_CLOSE_FROM_CLIENT: 2,
end SEND_STATUS_FROM_SERVER: 3,
RECV_INITIAL_METADATA: 4,
describe '#start_read' do RECV_MESSAGE: 5,
xit 'should fail if called immediately' do RECV_STATUS_ON_CLIENT: 6,
blk = proc { make_test_call.start_read(@tag) } RECV_CLOSE_ON_SERVER: 7
expect(&blk).to raise_error GRPC::Core::CallError }
end
end
describe '#start_write' do
xit 'should fail if called immediately' do
bytes = GRPC::Core::ByteBuffer.new('test string')
blk = proc { make_test_call.start_write(bytes, @tag) }
expect(&blk).to raise_error GRPC::Core::CallError
end
end end
describe '#start_write_status' do it 'should have symbols for all the known operation types' do
xit 'should fail if called immediately' do m = GRPC::Core::CallOps
blk = proc { make_test_call.start_write_status(153, 'x', @tag) } syms_and_codes = m.constants.collect { |c| [c, m.const_get(c)] }
expect(&blk).to raise_error GRPC::Core::CallError expect(Hash[syms_and_codes]).to eq(@known_types)
end end
end end
describe '#writes_done' do describe GRPC::Core::Call do
xit 'should fail if called immediately' do let(:client_queue) { GRPC::Core::CompletionQueue.new }
blk = proc { make_test_call.writes_done(Object.new) } let(:test_tag) { Object.new }
expect(&blk).to raise_error GRPC::Core::CallError let(:fake_host) { 'localhost:10101' }
end
end
describe '#add_metadata' do before(:each) do
it 'adds metadata to a call without fail' do @ch = GRPC::Core::Channel.new(fake_host, nil)
call = make_test_call
n = 37
one_md = proc { |x| [sprintf('key%d', x), sprintf('value%d', x)] }
metadata = Hash[n.times.collect { |i| one_md.call i }]
expect { call.add_metadata(metadata) }.to_not raise_error
end
end end
describe '#status' do describe '#status' do
@ -154,7 +137,7 @@ describe GRPC::Core::Call do
end end
def make_test_call def make_test_call
@ch.create_call('dummy_method', 'dummy_host', deadline) @ch.create_call(client_queue, 'dummy_method', 'dummy_host', deadline)
end end
def deadline def deadline

@ -36,16 +36,13 @@ def load_test_certs
end end
describe GRPC::Core::Channel do describe GRPC::Core::Channel do
FAKE_HOST = 'localhost:0' let(:fake_host) { 'localhost:0' }
let(:cq) { GRPC::Core::CompletionQueue.new }
def create_test_cert def create_test_cert
GRPC::Core::Credentials.new(load_test_certs[0]) GRPC::Core::Credentials.new(load_test_certs[0])
end end
before(:each) do
@cq = GRPC::Core::CompletionQueue.new
end
shared_examples '#new' do shared_examples '#new' do
it 'take a host name without channel args' do it 'take a host name without channel args' do
expect { GRPC::Core::Channel.new('dummy_host', nil) }.not_to raise_error expect { GRPC::Core::Channel.new('dummy_host', nil) }.not_to raise_error
@ -115,25 +112,23 @@ describe GRPC::Core::Channel do
describe '#create_call' do describe '#create_call' do
it 'creates a call OK' do it 'creates a call OK' do
host = FAKE_HOST ch = GRPC::Core::Channel.new(fake_host, nil)
ch = GRPC::Core::Channel.new(host, nil)
deadline = Time.now + 5 deadline = Time.now + 5
blk = proc do blk = proc do
ch.create_call('dummy_method', 'dummy_host', deadline) ch.create_call(cq, 'dummy_method', 'dummy_host', deadline)
end end
expect(&blk).to_not raise_error expect(&blk).to_not raise_error
end end
it 'raises an error if called on a closed channel' do it 'raises an error if called on a closed channel' do
host = FAKE_HOST ch = GRPC::Core::Channel.new(fake_host, nil)
ch = GRPC::Core::Channel.new(host, nil)
ch.close ch.close
deadline = Time.now + 5 deadline = Time.now + 5
blk = proc do blk = proc do
ch.create_call('dummy_method', 'dummy_host', deadline) ch.create_call(cq, 'dummy_method', 'dummy_host', deadline)
end end
expect(&blk).to raise_error(RuntimeError) expect(&blk).to raise_error(RuntimeError)
end end
@ -141,15 +136,13 @@ describe GRPC::Core::Channel do
describe '#destroy' do describe '#destroy' do
it 'destroys a channel ok' do it 'destroys a channel ok' do
host = FAKE_HOST ch = GRPC::Core::Channel.new(fake_host, nil)
ch = GRPC::Core::Channel.new(host, nil)
blk = proc { ch.destroy } blk = proc { ch.destroy }
expect(&blk).to_not raise_error expect(&blk).to_not raise_error
end end
it 'can be called more than once without error' do it 'can be called more than once without error' do
host = FAKE_HOST ch = GRPC::Core::Channel.new(fake_host, nil)
ch = GRPC::Core::Channel.new(host, nil)
blk = proc { ch.destroy } blk = proc { ch.destroy }
blk.call blk.call
expect(&blk).to_not raise_error expect(&blk).to_not raise_error
@ -164,15 +157,13 @@ describe GRPC::Core::Channel do
describe '#close' do describe '#close' do
it 'closes a channel ok' do it 'closes a channel ok' do
host = FAKE_HOST ch = GRPC::Core::Channel.new(fake_host, nil)
ch = GRPC::Core::Channel.new(host, nil)
blk = proc { ch.close } blk = proc { ch.close }
expect(&blk).to_not raise_error expect(&blk).to_not raise_error
end end
it 'can be called more than once without error' do it 'can be called more than once without error' do
host = FAKE_HOST ch = GRPC::Core::Channel.new(fake_host, nil)
ch = GRPC::Core::Channel.new(host, nil)
blk = proc { ch.close } blk = proc { ch.close }
blk.call blk.call
expect(&blk).to_not raise_error expect(&blk).to_not raise_error

@ -30,7 +30,6 @@
require 'grpc' require 'grpc'
require 'spec_helper' require 'spec_helper'
include GRPC::Core::CompletionType
include GRPC::Core include GRPC::Core
def load_test_certs def load_test_certs
@ -40,6 +39,8 @@ def load_test_certs
end end
shared_context 'setup: tags' do shared_context 'setup: tags' do
let(:sent_message) { 'sent message' }
let(:reply_text) { 'the reply' }
before(:example) do before(:example) do
@server_finished_tag = Object.new @server_finished_tag = Object.new
@client_finished_tag = Object.new @client_finished_tag = Object.new
@ -52,153 +53,136 @@ shared_context 'setup: tags' do
Time.now + 2 Time.now + 2
end end
def expect_next_event_on(queue, type, tag)
ev = queue.pluck(tag, deadline)
if type.nil?
expect(ev).to be_nil
else
expect(ev).to_not be_nil
expect(ev.type).to be(type)
end
ev
end
def server_allows_client_to_proceed def server_allows_client_to_proceed
@server.request_call(@server_tag) recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline)
ev = @server_queue.pluck(@server_tag, deadline) expect(recvd_rpc).to_not eq nil
expect(ev).not_to be_nil server_call = recvd_rpc.call
expect(ev.type).to be(SERVER_RPC_NEW) ops = { CallOps::SEND_INITIAL_METADATA => {} }
server_call = ev.call svr_batch = server_call.run_batch(@server_queue, @server_tag, deadline, ops)
server_call.server_accept(@server_queue, @server_finished_tag) expect(svr_batch.send_metadata).to be true
server_call.server_end_initial_metadata
server_call server_call
end end
def server_responds_with(server_call, reply_text)
reply = ByteBuffer.new(reply_text)
server_call.start_read(@server_tag)
ev = @server_queue.pluck(@server_tag, TimeConsts::INFINITE_FUTURE)
expect(ev.type).to be(READ)
server_call.start_write(reply, @server_tag)
ev = @server_queue.pluck(@server_tag, TimeConsts::INFINITE_FUTURE)
expect(ev).not_to be_nil
expect(ev.type).to be(WRITE_ACCEPTED)
end
def client_sends(call, sent = 'a message')
req = ByteBuffer.new(sent)
call.start_write(req, @tag)
ev = @client_queue.pluck(@tag, TimeConsts::INFINITE_FUTURE)
expect(ev).not_to be_nil
expect(ev.type).to be(WRITE_ACCEPTED)
sent
end
def new_client_call def new_client_call
@ch.create_call('/method', 'foo.test.google.fr', deadline) @ch.create_call(@client_queue, '/method', 'foo.test.google.fr', deadline)
end end
end end
shared_examples 'basic GRPC message delivery is OK' do shared_examples 'basic GRPC message delivery is OK' do
include GRPC::Core
include_context 'setup: tags' include_context 'setup: tags'
it 'servers receive requests from clients and start responding' do it 'servers receive requests from clients and can respond' do
reply = ByteBuffer.new('the server payload')
call = new_client_call call = new_client_call
call.invoke(@client_queue, @client_metadata_tag, @client_finished_tag) client_ops = {
CallOps::SEND_INITIAL_METADATA => {},
# check the server rpc new was received CallOps::SEND_MESSAGE => sent_message
# @server.request_call(@server_tag) }
# ev = expect_next_event_on(@server_queue, SERVER_RPC_NEW, @server_tag) batch_result = call.run_batch(@client_queue, @client_tag, deadline,
client_ops)
# accept the call expect(batch_result.send_metadata).to be true
# server_call = ev.call expect(batch_result.send_message).to be true
# server_call.server_accept(@server_queue, @server_finished_tag)
# server_call.server_end_initial_metadata
server_call = server_allows_client_to_proceed
# client sends a message
msg = client_sends(call)
# confirm the server can read the inbound message # confirm the server can read the inbound message
server_call.start_read(@server_tag) server_call = server_allows_client_to_proceed
ev = expect_next_event_on(@server_queue, READ, @server_tag) server_ops = {
expect(ev.result.to_s).to eq(msg) CallOps::RECV_MESSAGE => nil
}
# the server response svr_batch = server_call.run_batch(@server_queue, @server_tag, deadline,
server_call.start_write(reply, @server_tag) server_ops)
expect_next_event_on(@server_queue, WRITE_ACCEPTED, @server_tag) expect(svr_batch.message).to eq(sent_message)
end end
it 'responses written by servers are received by the client' do it 'responses written by servers are received by the client' do
call = new_client_call call = new_client_call
call.invoke(@client_queue, @client_metadata_tag, @client_finished_tag) client_ops = {
server_call = server_allows_client_to_proceed CallOps::SEND_INITIAL_METADATA => {},
client_sends(call) CallOps::SEND_MESSAGE => sent_message
server_responds_with(server_call, 'server_response') }
batch_result = call.run_batch(@client_queue, @client_tag, deadline,
client_ops)
expect(batch_result.send_metadata).to be true
expect(batch_result.send_message).to be true
call.start_read(@tag) # confirm the server can read the inbound message
ev = expect_next_event_on(@client_queue, READ, @tag) server_call = server_allows_client_to_proceed
expect(ev.result.to_s).to eq('server_response') server_ops = {
CallOps::RECV_MESSAGE => nil,
CallOps::SEND_MESSAGE => reply_text
}
svr_batch = server_call.run_batch(@server_queue, @server_tag, deadline,
server_ops)
expect(svr_batch.message).to eq(sent_message)
expect(svr_batch.send_message).to be true
end end
it 'servers can ignore a client write and send a status' do it 'servers can ignore a client write and send a status' do
call = new_client_call call = new_client_call
call.invoke(@client_queue, @client_metadata_tag, @client_finished_tag) client_ops = {
CallOps::SEND_INITIAL_METADATA => {},
# check the server rpc new was received CallOps::SEND_MESSAGE => sent_message
@server.request_call(@server_tag) }
ev = expect_next_event_on(@server_queue, SERVER_RPC_NEW, @server_tag) batch_result = call.run_batch(@client_queue, @client_tag, deadline,
expect(ev.tag).to be(@server_tag) client_ops)
expect(batch_result.send_metadata).to be true
# accept the call - need to do this to sent status. expect(batch_result.send_message).to be true
server_call = ev.call
server_call.server_accept(@server_queue, @server_finished_tag) # confirm the server can read the inbound message
server_call.server_end_initial_metadata the_status = Struct::Status.new(StatusCodes::OK, 'OK')
server_call.start_write_status(StatusCodes::NOT_FOUND, 'not found', server_call = server_allows_client_to_proceed
@server_tag) server_ops = {
CallOps::SEND_STATUS_FROM_SERVER => the_status
# Client sends some data }
client_sends(call) svr_batch = server_call.run_batch(@server_queue, @server_tag, deadline,
server_ops)
# client gets an empty response for the read, preceeded by some metadata. expect(svr_batch.message).to eq nil
call.start_read(@tag) expect(svr_batch.send_status).to be true
expect_next_event_on(@client_queue, CLIENT_METADATA_READ,
@client_metadata_tag)
ev = expect_next_event_on(@client_queue, READ, @tag)
expect(ev.tag).to be(@tag)
expect(ev.result.to_s).to eq('')
# finally, after client sends writes_done, they get the finished.
call.writes_done(@tag)
expect_next_event_on(@client_queue, FINISH_ACCEPTED, @tag)
ev = expect_next_event_on(@client_queue, FINISHED, @client_finished_tag)
expect(ev.result.code).to eq(StatusCodes::NOT_FOUND)
end end
it 'completes calls by sending status to client and server' do it 'completes calls by sending status to client and server' do
call = new_client_call call = new_client_call
call.invoke(@client_queue, @client_metadata_tag, @client_finished_tag) client_ops = {
CallOps::SEND_INITIAL_METADATA => {},
CallOps::SEND_MESSAGE => sent_message
}
batch_result = call.run_batch(@client_queue, @client_tag, deadline,
client_ops)
expect(batch_result.send_metadata).to be true
expect(batch_result.send_message).to be true
# confirm the server can read the inbound message and respond
the_status = Struct::Status.new(StatusCodes::OK, 'OK', {})
server_call = server_allows_client_to_proceed server_call = server_allows_client_to_proceed
client_sends(call) server_ops = {
server_responds_with(server_call, 'server_response') CallOps::RECV_MESSAGE => nil,
server_call.start_write_status(10_101, 'status code is 10101', @server_tag) CallOps::SEND_MESSAGE => reply_text,
CallOps::SEND_STATUS_FROM_SERVER => the_status
# first the client says writes are done }
call.start_read(@tag) svr_batch = server_call.run_batch(@server_queue, @server_tag, deadline,
expect_next_event_on(@client_queue, READ, @tag) server_ops)
call.writes_done(@tag) expect(svr_batch.message).to eq sent_message
expect(svr_batch.send_status).to be true
# but nothing happens until the server sends a status expect(svr_batch.send_message).to be true
expect_next_event_on(@server_queue, FINISH_ACCEPTED, @server_tag)
ev = expect_next_event_on(@server_queue, FINISHED, @server_finished_tag) # confirm the client can receive the server response and status.
expect(ev.result).to be_a(Struct::Status) client_ops = {
CallOps::SEND_CLOSE_FROM_CLIENT => nil,
# client gets FINISHED CallOps::RECV_MESSAGE => nil,
expect_next_event_on(@client_queue, FINISH_ACCEPTED, @tag) CallOps::RECV_STATUS_ON_CLIENT => nil
ev = expect_next_event_on(@client_queue, FINISHED, @client_finished_tag) }
expect(ev.result.details).to eq('status code is 10101') batch_result = call.run_batch(@client_queue, @client_tag, deadline,
expect(ev.result.code).to eq(10_101) client_ops)
expect(batch_result.send_close).to be true
expect(batch_result.message).to eq reply_text
expect(batch_result.status).to eq the_status
# confirm the server can receive the client close.
server_ops = {
CallOps::RECV_CLOSE_ON_SERVER => nil
}
svr_batch = server_call.run_batch(@server_queue, @server_tag, deadline,
server_ops)
expect(svr_batch.send_close).to be true
end end
end end
@ -224,25 +208,33 @@ shared_examples 'GRPC metadata delivery works OK' do
it 'raises an exception if a metadata key is invalid' do it 'raises an exception if a metadata key is invalid' do
@bad_keys.each do |md| @bad_keys.each do |md|
call = new_client_call call = new_client_call
expect { call.add_metadata(md) }.to raise_error client_ops = {
CallOps::SEND_INITIAL_METADATA => md
}
blk = proc do
call.run_batch(@client_queue, @client_tag, deadline,
client_ops)
end
expect(&blk).to raise_error
end end
end end
it 'sends all the metadata pairs when keys and values are valid' do it 'sends all the metadata pairs when keys and values are valid' do
@valid_metadata.each do |md| @valid_metadata.each do |md|
call = new_client_call call = new_client_call
call.add_metadata(md) client_ops = {
CallOps::SEND_INITIAL_METADATA => md
# Client begins a call OK }
call.invoke(@client_queue, @client_metadata_tag, @client_finished_tag) batch_result = call.run_batch(@client_queue, @client_tag, deadline,
client_ops)
# ... server has all metadata available even though the client did not expect(batch_result.send_metadata).to be true
# send a write
@server.request_call(@server_tag) # confirm the server can receive the client metadata
ev = expect_next_event_on(@server_queue, SERVER_RPC_NEW, @server_tag) recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline)
expect(recvd_rpc).to_not eq nil
recvd_md = recvd_rpc.metadata
replace_symbols = Hash[md.each_pair.collect { |x, y| [x.to_s, y] }] replace_symbols = Hash[md.each_pair.collect { |x, y| [x.to_s, y] }]
result = ev.result.metadata expect(recvd_md).to eq(recvd_md.merge(replace_symbols))
expect(result.merge(replace_symbols)).to eq(result)
end end
end end
end end
@ -266,55 +258,81 @@ shared_examples 'GRPC metadata delivery works OK' do
it 'raises an exception if a metadata key is invalid' do it 'raises an exception if a metadata key is invalid' do
@bad_keys.each do |md| @bad_keys.each do |md|
call = new_client_call call = new_client_call
call.invoke(@client_queue, @client_metadata_tag, @client_finished_tag) # client signals that it's done sending metadata to allow server to
# respond
client_ops = {
CallOps::SEND_INITIAL_METADATA => nil
}
call.run_batch(@client_queue, @client_tag, deadline, client_ops)
# server gets the invocation # server gets the invocation
@server.request_call(@server_tag) recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline)
ev = expect_next_event_on(@server_queue, SERVER_RPC_NEW, @server_tag) expect(recvd_rpc).to_not eq nil
expect { ev.call.add_metadata(md) }.to raise_error server_ops = {
CallOps::SEND_INITIAL_METADATA => md
}
blk = proc do
recvd_rpc.call.run_batch(@server_queue, @server_tag, deadline,
server_ops)
end
expect(&blk).to raise_error
end end
end end
it 'sends a hash that contains the status when no metadata is added' do it 'sends an empty hash if no metadata is added' do
call = new_client_call call = new_client_call
call.invoke(@client_queue, @client_metadata_tag, @client_finished_tag) # client signals that it's done sending metadata to allow server to
# respond
# server gets the invocation client_ops = {
@server.request_call(@server_tag) CallOps::SEND_INITIAL_METADATA => nil
ev = expect_next_event_on(@server_queue, SERVER_RPC_NEW, @server_tag) }
server_call = ev.call call.run_batch(@client_queue, @client_tag, deadline, client_ops)
# ... server accepts the call without adding metadata # server gets the invocation but sends no metadata back
server_call.server_accept(@server_queue, @server_finished_tag) recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline)
server_call.server_end_initial_metadata expect(recvd_rpc).to_not eq nil
server_call = recvd_rpc.call
# there is the HTTP status metadata, though there should not be any server_ops = {
# TODO: update this with the bug number to be resolved CallOps::SEND_INITIAL_METADATA => nil
ev = expect_next_event_on(@client_queue, CLIENT_METADATA_READ, }
@client_metadata_tag) server_call.run_batch(@server_queue, @server_tag, deadline, server_ops)
expect(ev.result).to eq({})
# client receives nothing as expected
client_ops = {
CallOps::RECV_INITIAL_METADATA => nil
}
batch_result = call.run_batch(@client_queue, @client_tag, deadline,
client_ops)
expect(batch_result.metadata).to eq({})
end end
it 'sends all the pairs when keys and values are valid' do it 'sends all the pairs when keys and values are valid' do
@valid_metadata.each do |md| @valid_metadata.each do |md|
call = new_client_call call = new_client_call
call.invoke(@client_queue, @client_metadata_tag, @client_finished_tag) # client signals that it's done sending metadata to allow server to
# respond
# server gets the invocation client_ops = {
@server.request_call(@server_tag) CallOps::SEND_INITIAL_METADATA => nil
ev = expect_next_event_on(@server_queue, SERVER_RPC_NEW, @server_tag) }
server_call = ev.call call.run_batch(@client_queue, @client_tag, deadline, client_ops)
# ... server adds metadata and accepts the call # server gets the invocation but sends no metadata back
server_call.add_metadata(md) recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline)
server_call.server_accept(@server_queue, @server_finished_tag) expect(recvd_rpc).to_not eq nil
server_call.server_end_initial_metadata server_call = recvd_rpc.call
server_ops = {
# Now the client can read the metadata CallOps::SEND_INITIAL_METADATA => md
ev = expect_next_event_on(@client_queue, CLIENT_METADATA_READ, }
@client_metadata_tag) server_call.run_batch(@server_queue, @server_tag, deadline, server_ops)
# client receives nothing as expected
client_ops = {
CallOps::RECV_INITIAL_METADATA => nil
}
batch_result = call.run_batch(@client_queue, @client_tag, deadline,
client_ops)
replace_symbols = Hash[md.each_pair.collect { |x, y| [x.to_s, y] }] replace_symbols = Hash[md.each_pair.collect { |x, y| [x.to_s, y] }]
expect(ev.result).to eq(replace_symbols) expect(batch_result.metadata).to eq(replace_symbols)
end end
end end
end end

@ -1,53 +0,0 @@
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
require 'grpc'
describe GRPC::Core::CompletionType do
before(:each) do
@known_types = {
QUEUE_SHUTDOWN: 0,
OP_COMPLETE: 1,
READ: 2,
WRITE_ACCEPTED: 3,
FINISH_ACCEPTED: 4,
CLIENT_METADATA_READ: 5,
FINISHED: 6,
SERVER_RPC_NEW: 7,
SERVER_SHUTDOWN: 8,
RESERVED: 9
}
end
it 'should have all the known types' do
mod = GRPC::Core::CompletionType
blk = proc { Hash[mod.constants.collect { |c| [c, mod.const_get(c)] }] }
expect(blk.call).to eq(@known_types)
end
end

@ -34,12 +34,11 @@ include GRPC::Core::StatusCodes
describe GRPC::ActiveCall do describe GRPC::ActiveCall do
ActiveCall = GRPC::ActiveCall ActiveCall = GRPC::ActiveCall
Call = GRPC::Core::Call Call = GRPC::Core::Call
CompletionType = GRPC::Core::CompletionType CallOps = GRPC::Core::CallOps
before(:each) do before(:each) do
@pass_through = proc { |x| x } @pass_through = proc { |x| x }
@server_tag = Object.new @server_tag = Object.new
@server_done_tag = Object.new
@tag = Object.new @tag = Object.new
@client_queue = GRPC::Core::CompletionQueue.new @client_queue = GRPC::Core::CompletionQueue.new
@ -48,7 +47,7 @@ describe GRPC::ActiveCall do
@server = GRPC::Core::Server.new(@server_queue, nil) @server = GRPC::Core::Server.new(@server_queue, nil)
server_port = @server.add_http2_port(host) server_port = @server.add_http2_port(host)
@server.start @server.start
@ch = GRPC::Core::Channel.new("localhost:#{server_port}", nil) @ch = GRPC::Core::Channel.new("0.0.0.0:#{server_port}", nil)
end end
after(:each) do after(:each) do
@ -58,12 +57,10 @@ describe GRPC::ActiveCall do
describe 'restricted view methods' do describe 'restricted view methods' do
before(:each) do before(:each) do
call = make_test_call call = make_test_call
done_tag, meta_tag = ActiveCall.client_invoke(call, @client_queue, md_tag = ActiveCall.client_invoke(call, @client_queue, deadline)
deadline)
@client_call = ActiveCall.new(call, @client_queue, @pass_through, @client_call = ActiveCall.new(call, @client_queue, @pass_through,
@pass_through, deadline, @pass_through, deadline,
finished_tag: done_tag, metadata_tag: md_tag)
read_metadata_tag: meta_tag)
end end
describe '#multi_req_view' do describe '#multi_req_view' do
@ -90,48 +87,45 @@ describe GRPC::ActiveCall do
describe '#remote_send' do describe '#remote_send' do
it 'allows a client to send a payload to the server' do it 'allows a client to send a payload to the server' do
call = make_test_call call = make_test_call
done_tag, meta_tag = ActiveCall.client_invoke(call, @client_queue, md_tag = ActiveCall.client_invoke(call, @client_queue, deadline)
deadline)
@client_call = ActiveCall.new(call, @client_queue, @pass_through, @client_call = ActiveCall.new(call, @client_queue, @pass_through,
@pass_through, deadline, @pass_through, deadline,
finished_tag: done_tag, metadata_tag: md_tag)
read_metadata_tag: meta_tag)
msg = 'message is a string' msg = 'message is a string'
@client_call.remote_send(msg) @client_call.remote_send(msg)
# check that server rpc new was received # check that server rpc new was received
@server.request_call(@server_tag) recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline)
ev = @server_queue.next(deadline) expect(recvd_rpc).to_not eq nil
expect(ev.type).to be(CompletionType::SERVER_RPC_NEW) recvd_call = recvd_rpc.call
expect(ev.call).to be_a(Call)
expect(ev.tag).to be(@server_tag)
# Accept the call, and verify that the server reads the response ok. # Accept the call, and verify that the server reads the response ok.
ev.call.server_accept(@client_queue, @server_tag) server_ops = {
ev.call.server_end_initial_metadata CallOps::SEND_INITIAL_METADATA => {}
server_call = ActiveCall.new(ev.call, @client_queue, @pass_through, }
recvd_call.run_batch(@server_queue, @server_tag, deadline, server_ops)
server_call = ActiveCall.new(recvd_call, @server_queue, @pass_through,
@pass_through, deadline) @pass_through, deadline)
expect(server_call.remote_read).to eq(msg) expect(server_call.remote_read).to eq(msg)
end end
it 'marshals the payload using the marshal func' do it 'marshals the payload using the marshal func' do
call = make_test_call call = make_test_call
done_tag, meta_tag = ActiveCall.client_invoke(call, @client_queue, ActiveCall.client_invoke(call, @client_queue, deadline)
deadline)
marshal = proc { |x| 'marshalled:' + x } marshal = proc { |x| 'marshalled:' + x }
client_call = ActiveCall.new(call, @client_queue, marshal, client_call = ActiveCall.new(call, @client_queue, marshal,
@pass_through, deadline, @pass_through, deadline)
finished_tag: done_tag,
read_metadata_tag: meta_tag)
msg = 'message is a string' msg = 'message is a string'
client_call.remote_send(msg) client_call.remote_send(msg)
# confirm that the message was marshalled # confirm that the message was marshalled
@server.request_call(@server_tag) recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline)
ev = @server_queue.next(deadline) recvd_call = recvd_rpc.call
ev.call.server_accept(@client_queue, @server_tag) server_ops = {
ev.call.server_end_initial_metadata CallOps::SEND_INITIAL_METADATA => nil
server_call = ActiveCall.new(ev.call, @client_queue, @pass_through, }
recvd_call.run_batch(@server_queue, @server_tag, deadline, server_ops)
server_call = ActiveCall.new(recvd_call, @server_queue, @pass_through,
@pass_through, deadline) @pass_through, deadline)
expect(server_call.remote_read).to eq('marshalled:' + msg) expect(server_call.remote_read).to eq('marshalled:' + msg)
end end
@ -142,23 +136,22 @@ describe GRPC::ActiveCall do
call = make_test_call call = make_test_call
ActiveCall.client_invoke(call, @client_queue, deadline, ActiveCall.client_invoke(call, @client_queue, deadline,
k1: 'v1', k2: 'v2') k1: 'v1', k2: 'v2')
@server.request_call(@server_tag) recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline)
ev = @server_queue.next(deadline) recvd_call = recvd_rpc.call
expect(ev).to_not be_nil expect(recvd_call).to_not be_nil
expect(ev.result.metadata['k1']).to eq('v1') expect(recvd_rpc.metadata).to_not be_nil
expect(ev.result.metadata['k2']).to eq('v2') expect(recvd_rpc.metadata['k1']).to eq('v1')
expect(recvd_rpc.metadata['k2']).to eq('v2')
end end
end end
describe '#remote_read' do describe '#remote_read' do
it 'reads the response sent by a server' do it 'reads the response sent by a server' do
call = make_test_call call = make_test_call
done_tag, meta_tag = ActiveCall.client_invoke(call, @client_queue, md_tag = ActiveCall.client_invoke(call, @client_queue, deadline)
deadline)
client_call = ActiveCall.new(call, @client_queue, @pass_through, client_call = ActiveCall.new(call, @client_queue, @pass_through,
@pass_through, deadline, @pass_through, deadline,
finished_tag: done_tag, metadata_tag: md_tag)
read_metadata_tag: meta_tag)
msg = 'message is a string' msg = 'message is a string'
client_call.remote_send(msg) client_call.remote_send(msg)
server_call = expect_server_to_receive(msg) server_call = expect_server_to_receive(msg)
@ -168,12 +161,10 @@ describe GRPC::ActiveCall do
it 'saves no metadata when the server adds no metadata' do it 'saves no metadata when the server adds no metadata' do
call = make_test_call call = make_test_call
done_tag, meta_tag = ActiveCall.client_invoke(call, @client_queue, md_tag = ActiveCall.client_invoke(call, @client_queue, deadline)
deadline)
client_call = ActiveCall.new(call, @client_queue, @pass_through, client_call = ActiveCall.new(call, @client_queue, @pass_through,
@pass_through, deadline, @pass_through, deadline,
finished_tag: done_tag, metadata_tag: md_tag)
read_metadata_tag: meta_tag)
msg = 'message is a string' msg = 'message is a string'
client_call.remote_send(msg) client_call.remote_send(msg)
server_call = expect_server_to_receive(msg) server_call = expect_server_to_receive(msg)
@ -185,12 +176,10 @@ describe GRPC::ActiveCall do
it 'saves metadata add by the server' do it 'saves metadata add by the server' do
call = make_test_call call = make_test_call
done_tag, meta_tag = ActiveCall.client_invoke(call, @client_queue, md_tag = ActiveCall.client_invoke(call, @client_queue, deadline)
deadline)
client_call = ActiveCall.new(call, @client_queue, @pass_through, client_call = ActiveCall.new(call, @client_queue, @pass_through,
@pass_through, deadline, @pass_through, deadline,
finished_tag: done_tag, metadata_tag: md_tag)
read_metadata_tag: meta_tag)
msg = 'message is a string' msg = 'message is a string'
client_call.remote_send(msg) client_call.remote_send(msg)
server_call = expect_server_to_receive(msg, k1: 'v1', k2: 'v2') server_call = expect_server_to_receive(msg, k1: 'v1', k2: 'v2')
@ -203,12 +192,10 @@ describe GRPC::ActiveCall do
it 'get a nil msg before a status when an OK status is sent' do it 'get a nil msg before a status when an OK status is sent' do
call = make_test_call call = make_test_call
done_tag, meta_tag = ActiveCall.client_invoke(call, @client_queue, md_tag = ActiveCall.client_invoke(call, @client_queue, deadline)
deadline)
client_call = ActiveCall.new(call, @client_queue, @pass_through, client_call = ActiveCall.new(call, @client_queue, @pass_through,
@pass_through, deadline, @pass_through, deadline,
finished_tag: done_tag, metadata_tag: md_tag)
read_metadata_tag: meta_tag)
msg = 'message is a string' msg = 'message is a string'
client_call.remote_send(msg) client_call.remote_send(msg)
client_call.writes_done(false) client_call.writes_done(false)
@ -222,13 +209,11 @@ describe GRPC::ActiveCall do
it 'unmarshals the response using the unmarshal func' do it 'unmarshals the response using the unmarshal func' do
call = make_test_call call = make_test_call
done_tag, meta_tag = ActiveCall.client_invoke(call, @client_queue, md_tag = ActiveCall.client_invoke(call, @client_queue, deadline)
deadline)
unmarshal = proc { |x| 'unmarshalled:' + x } unmarshal = proc { |x| 'unmarshalled:' + x }
client_call = ActiveCall.new(call, @client_queue, @pass_through, client_call = ActiveCall.new(call, @client_queue, @pass_through,
unmarshal, deadline, unmarshal, deadline,
finished_tag: done_tag, metadata_tag: md_tag)
read_metadata_tag: meta_tag)
# confirm the client receives the unmarshalled message # confirm the client receives the unmarshalled message
msg = 'message is a string' msg = 'message is a string'
@ -249,13 +234,11 @@ describe GRPC::ActiveCall do
it 'the returns an enumerator that can read n responses' do it 'the returns an enumerator that can read n responses' do
call = make_test_call call = make_test_call
done_tag, meta_tag = ActiveCall.client_invoke(call, @client_queue, md_tag = ActiveCall.client_invoke(call, @client_queue, deadline)
deadline)
client_call = ActiveCall.new(call, @client_queue, @pass_through, client_call = ActiveCall.new(call, @client_queue, @pass_through,
@pass_through, deadline, @pass_through, deadline,
finished_tag: done_tag, metadata_tag: md_tag)
read_metadata_tag: meta_tag) msg = 'message is a string'
msg = 'message is 4a string'
reply = 'server_response' reply = 'server_response'
client_call.remote_send(msg) client_call.remote_send(msg)
server_call = expect_server_to_receive(msg) server_call = expect_server_to_receive(msg)
@ -269,12 +252,10 @@ describe GRPC::ActiveCall do
it 'the returns an enumerator that stops after an OK Status' do it 'the returns an enumerator that stops after an OK Status' do
call = make_test_call call = make_test_call
done_tag, meta_tag = ActiveCall.client_invoke(call, @client_queue, md_tag = ActiveCall.client_invoke(call, @client_queue, deadline)
deadline)
client_call = ActiveCall.new(call, @client_queue, @pass_through, client_call = ActiveCall.new(call, @client_queue, @pass_through,
@pass_through, deadline, @pass_through, deadline,
read_metadata_tag: meta_tag, metadata_tag: md_tag)
finished_tag: done_tag)
msg = 'message is a string' msg = 'message is a string'
reply = 'server_response' reply = 'server_response'
client_call.remote_send(msg) client_call.remote_send(msg)
@ -294,12 +275,10 @@ describe GRPC::ActiveCall do
describe '#writes_done' do describe '#writes_done' do
it 'finishes ok if the server sends a status response' do it 'finishes ok if the server sends a status response' do
call = make_test_call call = make_test_call
done_tag, meta_tag = ActiveCall.client_invoke(call, @client_queue, md_tag = ActiveCall.client_invoke(call, @client_queue, deadline)
deadline)
client_call = ActiveCall.new(call, @client_queue, @pass_through, client_call = ActiveCall.new(call, @client_queue, @pass_through,
@pass_through, deadline, @pass_through, deadline,
finished_tag: done_tag, metadata_tag: md_tag)
read_metadata_tag: meta_tag)
msg = 'message is a string' msg = 'message is a string'
client_call.remote_send(msg) client_call.remote_send(msg)
expect { client_call.writes_done(false) }.to_not raise_error expect { client_call.writes_done(false) }.to_not raise_error
@ -312,12 +291,10 @@ describe GRPC::ActiveCall do
it 'finishes ok if the server sends an early status response' do it 'finishes ok if the server sends an early status response' do
call = make_test_call call = make_test_call
done_tag, meta_tag = ActiveCall.client_invoke(call, @client_queue, md_tag = ActiveCall.client_invoke(call, @client_queue, deadline)
deadline)
client_call = ActiveCall.new(call, @client_queue, @pass_through, client_call = ActiveCall.new(call, @client_queue, @pass_through,
@pass_through, deadline, @pass_through, deadline,
read_metadata_tag: meta_tag, metadata_tag: md_tag)
finished_tag: done_tag)
msg = 'message is a string' msg = 'message is a string'
client_call.remote_send(msg) client_call.remote_send(msg)
server_call = expect_server_to_receive(msg) server_call = expect_server_to_receive(msg)
@ -330,12 +307,10 @@ describe GRPC::ActiveCall do
it 'finishes ok if writes_done is true' do it 'finishes ok if writes_done is true' do
call = make_test_call call = make_test_call
done_tag, meta_tag = ActiveCall.client_invoke(call, @client_queue, md_tag = ActiveCall.client_invoke(call, @client_queue, deadline)
deadline)
client_call = ActiveCall.new(call, @client_queue, @pass_through, client_call = ActiveCall.new(call, @client_queue, @pass_through,
@pass_through, deadline, @pass_through, deadline,
read_metadata_tag: meta_tag, metadata_tag: md_tag)
finished_tag: done_tag)
msg = 'message is a string' msg = 'message is a string'
client_call.remote_send(msg) client_call.remote_send(msg)
server_call = expect_server_to_receive(msg) server_call = expect_server_to_receive(msg)
@ -353,21 +328,20 @@ describe GRPC::ActiveCall do
end end
def expect_server_to_be_invoked(**kw) def expect_server_to_be_invoked(**kw)
@server.request_call(@server_tag) recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline)
ev = @server_queue.next(deadline) expect(recvd_rpc).to_not eq nil
ev.call.add_metadata(kw) recvd_call = recvd_rpc.call
ev.call.server_accept(@client_queue, @server_done_tag) recvd_call.run_batch(@server_queue, @server_tag, deadline,
ev.call.server_end_initial_metadata CallOps::SEND_INITIAL_METADATA => kw)
ActiveCall.new(ev.call, @client_queue, @pass_through, ActiveCall.new(recvd_call, @server_queue, @pass_through,
@pass_through, deadline, @pass_through, deadline)
finished_tag: @server_done_tag)
end end
def make_test_call def make_test_call
@ch.create_call('dummy_method', 'dummy_host', deadline) @ch.create_call(@client_queue, '/method', 'a.dummy.host', deadline)
end end
def deadline def deadline
Time.now + 1 # in 1 second; arbitrary Time.now + 2 # in 2 seconds; arbitrary
end end
end end

@ -30,15 +30,41 @@
require 'grpc' require 'grpc'
require 'xray/thread_dump_signal_handler' require 'xray/thread_dump_signal_handler'
NOOP = proc { |x| x } # Notifier is useful high-level synchronization primitive.
FAKE_HOST = 'localhost:0' class Notifier
attr_reader :payload, :notified
alias_method :notified?, :notified
def initialize
@mutex = Mutex.new
@cvar = ConditionVariable.new
@notified = false
@payload = nil
end
def wait
@mutex.synchronize do
@cvar.wait(@mutex) until notified?
end
end
def notify(payload)
@mutex.synchronize do
return Error.new('already notified') if notified?
@payload = payload
@notified = true
@cvar.signal
return nil
end
end
end
def wakey_thread(&blk) def wakey_thread(&blk)
awake_mutex, awake_cond = Mutex.new, ConditionVariable.new n = Notifier.new
t = Thread.new do t = Thread.new do
blk.call(awake_mutex, awake_cond) blk.call(n)
end end
awake_mutex.synchronize { awake_cond.wait(awake_mutex) } n.wait
t t
end end
@ -50,8 +76,11 @@ end
include GRPC::Core::StatusCodes include GRPC::Core::StatusCodes
include GRPC::Core::TimeConsts include GRPC::Core::TimeConsts
include GRPC::Core::CallOps
describe 'ClientStub' do describe 'ClientStub' do
let(:noop) { proc { |x| x } }
before(:each) do before(:each) do
Thread.abort_on_exception = true Thread.abort_on_exception = true
@server = nil @server = nil
@ -66,61 +95,56 @@ describe 'ClientStub' do
end end
describe '#new' do describe '#new' do
let(:fake_host) { 'localhost:0' }
it 'can be created from a host and args' do it 'can be created from a host and args' do
host = FAKE_HOST
opts = { a_channel_arg: 'an_arg' } opts = { a_channel_arg: 'an_arg' }
blk = proc do blk = proc do
GRPC::ClientStub.new(host, @cq, **opts) GRPC::ClientStub.new(fake_host, @cq, **opts)
end end
expect(&blk).not_to raise_error expect(&blk).not_to raise_error
end end
it 'can be created with a default deadline' do it 'can be created with a default deadline' do
host = FAKE_HOST
opts = { a_channel_arg: 'an_arg', deadline: 5 } opts = { a_channel_arg: 'an_arg', deadline: 5 }
blk = proc do blk = proc do
GRPC::ClientStub.new(host, @cq, **opts) GRPC::ClientStub.new(fake_host, @cq, **opts)
end end
expect(&blk).not_to raise_error expect(&blk).not_to raise_error
end end
it 'can be created with an channel override' do it 'can be created with an channel override' do
host = FAKE_HOST
opts = { a_channel_arg: 'an_arg', channel_override: @ch } opts = { a_channel_arg: 'an_arg', channel_override: @ch }
blk = proc do blk = proc do
GRPC::ClientStub.new(host, @cq, **opts) GRPC::ClientStub.new(fake_host, @cq, **opts)
end end
expect(&blk).not_to raise_error expect(&blk).not_to raise_error
end end
it 'cannot be created with a bad channel override' do it 'cannot be created with a bad channel override' do
host = FAKE_HOST
blk = proc do blk = proc do
opts = { a_channel_arg: 'an_arg', channel_override: Object.new } opts = { a_channel_arg: 'an_arg', channel_override: Object.new }
GRPC::ClientStub.new(host, @cq, **opts) GRPC::ClientStub.new(fake_host, @cq, **opts)
end end
expect(&blk).to raise_error expect(&blk).to raise_error
end end
it 'cannot be created with bad credentials' do it 'cannot be created with bad credentials' do
host = FAKE_HOST
blk = proc do blk = proc do
opts = { a_channel_arg: 'an_arg', creds: Object.new } opts = { a_channel_arg: 'an_arg', creds: Object.new }
GRPC::ClientStub.new(host, @cq, **opts) GRPC::ClientStub.new(fake_host, @cq, **opts)
end end
expect(&blk).to raise_error expect(&blk).to raise_error
end end
it 'can be created with test test credentials' do it 'can be created with test test credentials' do
certs = load_test_certs certs = load_test_certs
host = FAKE_HOST
blk = proc do blk = proc do
opts = { opts = {
GRPC::Core::Channel::SSL_TARGET => 'foo.test.google.fr', GRPC::Core::Channel::SSL_TARGET => 'foo.test.google.fr',
a_channel_arg: 'an_arg', a_channel_arg: 'an_arg',
creds: GRPC::Core::Credentials.new(certs[0], nil, nil) creds: GRPC::Core::Credentials.new(certs[0], nil, nil)
} }
GRPC::ClientStub.new(host, @cq, **opts) GRPC::ClientStub.new(fake_host, @cq, **opts)
end end
expect(&blk).to_not raise_error expect(&blk).to_not raise_error
end end
@ -187,7 +211,7 @@ describe 'ClientStub' do
describe 'without a call operation' do describe 'without a call operation' do
def get_response(stub) def get_response(stub)
stub.request_response(@method, @sent_msg, NOOP, NOOP, stub.request_response(@method, @sent_msg, noop, noop,
k1: 'v1', k2: 'v2') k1: 'v1', k2: 'v2')
end end
@ -196,7 +220,7 @@ describe 'ClientStub' do
describe 'via a call operation' do describe 'via a call operation' do
def get_response(stub) def get_response(stub)
op = stub.request_response(@method, @sent_msg, NOOP, NOOP, op = stub.request_response(@method, @sent_msg, noop, noop,
return_op: true, k1: 'v1', k2: 'v2') return_op: true, k1: 'v1', k2: 'v2')
expect(op).to be_a(GRPC::ActiveCall::Operation) expect(op).to be_a(GRPC::ActiveCall::Operation)
op.execute op.execute
@ -259,7 +283,7 @@ describe 'ClientStub' do
describe 'without a call operation' do describe 'without a call operation' do
def get_response(stub) def get_response(stub)
stub.client_streamer(@method, @sent_msgs, NOOP, NOOP, stub.client_streamer(@method, @sent_msgs, noop, noop,
k1: 'v1', k2: 'v2') k1: 'v1', k2: 'v2')
end end
@ -268,7 +292,7 @@ describe 'ClientStub' do
describe 'via a call operation' do describe 'via a call operation' do
def get_response(stub) def get_response(stub)
op = stub.client_streamer(@method, @sent_msgs, NOOP, NOOP, op = stub.client_streamer(@method, @sent_msgs, noop, noop,
return_op: true, k1: 'v1', k2: 'v2') return_op: true, k1: 'v1', k2: 'v2')
expect(op).to be_a(GRPC::ActiveCall::Operation) expect(op).to be_a(GRPC::ActiveCall::Operation)
op.execute op.execute
@ -333,7 +357,7 @@ describe 'ClientStub' do
describe 'without a call operation' do describe 'without a call operation' do
def get_responses(stub) def get_responses(stub)
e = stub.server_streamer(@method, @sent_msg, NOOP, NOOP, e = stub.server_streamer(@method, @sent_msg, noop, noop,
k1: 'v1', k2: 'v2') k1: 'v1', k2: 'v2')
expect(e).to be_a(Enumerator) expect(e).to be_a(Enumerator)
e e
@ -344,7 +368,7 @@ describe 'ClientStub' do
describe 'via a call operation' do describe 'via a call operation' do
def get_responses(stub) def get_responses(stub)
op = stub.server_streamer(@method, @sent_msg, NOOP, NOOP, op = stub.server_streamer(@method, @sent_msg, noop, noop,
return_op: true, k1: 'v1', k2: 'v2') return_op: true, k1: 'v1', k2: 'v2')
expect(op).to be_a(GRPC::ActiveCall::Operation) expect(op).to be_a(GRPC::ActiveCall::Operation)
e = op.execute e = op.execute
@ -361,34 +385,30 @@ describe 'ClientStub' do
before(:each) do before(:each) do
@sent_msgs = Array.new(3) { |i| 'msg_' + (i + 1).to_s } @sent_msgs = Array.new(3) { |i| 'msg_' + (i + 1).to_s }
@replys = Array.new(3) { |i| 'reply_' + (i + 1).to_s } @replys = Array.new(3) { |i| 'reply_' + (i + 1).to_s }
server_port = create_test_server
@host = "localhost:#{server_port}"
end end
it 'supports sending all the requests first', bidi: true do it 'supports sending all the requests first', bidi: true do
server_port = create_test_server
host = "localhost:#{server_port}"
th = run_bidi_streamer_handle_inputs_first(@sent_msgs, @replys, th = run_bidi_streamer_handle_inputs_first(@sent_msgs, @replys,
@pass) @pass)
stub = GRPC::ClientStub.new(host, @cq) stub = GRPC::ClientStub.new(@host, @cq)
e = get_responses(stub) e = get_responses(stub)
expect(e.collect { |r| r }).to eq(@replys) expect(e.collect { |r| r }).to eq(@replys)
th.join th.join
end end
it 'supports client-initiated ping pong', bidi: true do it 'supports client-initiated ping pong', bidi: true do
server_port = create_test_server
host = "localhost:#{server_port}"
th = run_bidi_streamer_echo_ping_pong(@sent_msgs, @pass, true) th = run_bidi_streamer_echo_ping_pong(@sent_msgs, @pass, true)
stub = GRPC::ClientStub.new(host, @cq) stub = GRPC::ClientStub.new(@host, @cq)
e = get_responses(stub) e = get_responses(stub)
expect(e.collect { |r| r }).to eq(@sent_msgs) expect(e.collect { |r| r }).to eq(@sent_msgs)
th.join th.join
end end
it 'supports a server-initiated ping pong', bidi: true do it 'supports a server-initiated ping pong', bidi: true do
server_port = create_test_server
host = "localhost:#{server_port}"
th = run_bidi_streamer_echo_ping_pong(@sent_msgs, @pass, false) th = run_bidi_streamer_echo_ping_pong(@sent_msgs, @pass, false)
stub = GRPC::ClientStub.new(host, @cq) stub = GRPC::ClientStub.new(@host, @cq)
e = get_responses(stub) e = get_responses(stub)
expect(e.collect { |r| r }).to eq(@sent_msgs) expect(e.collect { |r| r }).to eq(@sent_msgs)
th.join th.join
@ -397,7 +417,7 @@ describe 'ClientStub' do
describe 'without a call operation' do describe 'without a call operation' do
def get_responses(stub) def get_responses(stub)
e = stub.bidi_streamer(@method, @sent_msgs, NOOP, NOOP) e = stub.bidi_streamer(@method, @sent_msgs, noop, noop)
expect(e).to be_a(Enumerator) expect(e).to be_a(Enumerator)
e e
end end
@ -407,7 +427,7 @@ describe 'ClientStub' do
describe 'via a call operation' do describe 'via a call operation' do
def get_responses(stub) def get_responses(stub)
op = stub.bidi_streamer(@method, @sent_msgs, NOOP, NOOP, op = stub.bidi_streamer(@method, @sent_msgs, noop, noop,
return_op: true) return_op: true)
expect(op).to be_a(GRPC::ActiveCall::Operation) expect(op).to be_a(GRPC::ActiveCall::Operation)
e = op.execute e = op.execute
@ -421,8 +441,8 @@ describe 'ClientStub' do
def run_server_streamer(expected_input, replys, status, **kw) def run_server_streamer(expected_input, replys, status, **kw)
wanted_metadata = kw.clone wanted_metadata = kw.clone
wakey_thread do |mtx, cnd| wakey_thread do |notifier|
c = expect_server_to_be_invoked(mtx, cnd) c = expect_server_to_be_invoked(notifier)
wanted_metadata.each do |k, v| wanted_metadata.each do |k, v|
expect(c.metadata[k.to_s]).to eq(v) expect(c.metadata[k.to_s]).to eq(v)
end end
@ -434,8 +454,8 @@ describe 'ClientStub' do
def run_bidi_streamer_handle_inputs_first(expected_inputs, replys, def run_bidi_streamer_handle_inputs_first(expected_inputs, replys,
status) status)
wakey_thread do |mtx, cnd| wakey_thread do |notifier|
c = expect_server_to_be_invoked(mtx, cnd) c = expect_server_to_be_invoked(notifier)
expected_inputs.each { |i| expect(c.remote_read).to eq(i) } expected_inputs.each { |i| expect(c.remote_read).to eq(i) }
replys.each { |r| c.remote_send(r) } replys.each { |r| c.remote_send(r) }
c.send_status(status, status == @pass ? 'OK' : 'NOK', true) c.send_status(status, status == @pass ? 'OK' : 'NOK', true)
@ -443,8 +463,8 @@ describe 'ClientStub' do
end end
def run_bidi_streamer_echo_ping_pong(expected_inputs, status, client_starts) def run_bidi_streamer_echo_ping_pong(expected_inputs, status, client_starts)
wakey_thread do |mtx, cnd| wakey_thread do |notifier|
c = expect_server_to_be_invoked(mtx, cnd) c = expect_server_to_be_invoked(notifier)
expected_inputs.each do |i| expected_inputs.each do |i|
if client_starts if client_starts
expect(c.remote_read).to eq(i) expect(c.remote_read).to eq(i)
@ -460,8 +480,8 @@ describe 'ClientStub' do
def run_client_streamer(expected_inputs, resp, status, **kw) def run_client_streamer(expected_inputs, resp, status, **kw)
wanted_metadata = kw.clone wanted_metadata = kw.clone
wakey_thread do |mtx, cnd| wakey_thread do |notifier|
c = expect_server_to_be_invoked(mtx, cnd) c = expect_server_to_be_invoked(notifier)
expected_inputs.each { |i| expect(c.remote_read).to eq(i) } expected_inputs.each { |i| expect(c.remote_read).to eq(i) }
wanted_metadata.each do |k, v| wanted_metadata.each do |k, v|
expect(c.metadata[k.to_s]).to eq(v) expect(c.metadata[k.to_s]).to eq(v)
@ -473,8 +493,8 @@ describe 'ClientStub' do
def run_request_response(expected_input, resp, status, **kw) def run_request_response(expected_input, resp, status, **kw)
wanted_metadata = kw.clone wanted_metadata = kw.clone
wakey_thread do |mtx, cnd| wakey_thread do |notifier|
c = expect_server_to_be_invoked(mtx, cnd) c = expect_server_to_be_invoked(notifier)
expect(c.remote_read).to eq(expected_input) expect(c.remote_read).to eq(expected_input)
wanted_metadata.each do |k, v| wanted_metadata.each do |k, v|
expect(c.metadata[k.to_s]).to eq(v) expect(c.metadata[k.to_s]).to eq(v)
@ -490,24 +510,16 @@ describe 'ClientStub' do
@server.add_http2_port('0.0.0.0:0') @server.add_http2_port('0.0.0.0:0')
end end
def start_test_server(awake_mutex, awake_cond) def expect_server_to_be_invoked(notifier)
@server.start @server.start
@server_tag = Object.new notifier.notify(nil)
@server.request_call(@server_tag) server_tag = Object.new
awake_mutex.synchronize { awake_cond.signal } recvd_rpc = @server.request_call(@server_queue, server_tag,
end INFINITE_FUTURE)
recvd_call = recvd_rpc.call
def expect_server_to_be_invoked(awake_mutex, awake_cond) recvd_call.metadata = recvd_rpc.metadata
start_test_server(awake_mutex, awake_cond) recvd_call.run_batch(@server_queue, server_tag, Time.now + 2,
ev = @server_queue.pluck(@server_tag, INFINITE_FUTURE) SEND_INITIAL_METADATA => nil)
fail OutOfTime if ev.nil? GRPC::ActiveCall.new(recvd_call, @server_queue, noop, noop, INFINITE_FUTURE)
server_call = ev.call
server_call.metadata = ev.result.metadata
finished_tag = Object.new
server_call.server_accept(@server_queue, finished_tag)
server_call.server_end_initial_metadata
GRPC::ActiveCall.new(server_call, @server_queue, NOOP, NOOP,
INFINITE_FUTURE,
finished_tag: finished_tag)
end end
end end

@ -37,7 +37,6 @@ describe GRPC::RpcDesc do
INTERNAL = GRPC::Core::StatusCodes::INTERNAL INTERNAL = GRPC::Core::StatusCodes::INTERNAL
UNKNOWN = GRPC::Core::StatusCodes::UNKNOWN UNKNOWN = GRPC::Core::StatusCodes::UNKNOWN
CallError = GRPC::Core::CallError CallError = GRPC::Core::CallError
EventError = GRPC::Core::EventError
before(:each) do before(:each) do
@request_response = RpcDesc.new('rr', Object.new, Object.new, 'encode', @request_response = RpcDesc.new('rr', Object.new, Object.new, 'encode',
@ -63,24 +62,17 @@ describe GRPC::RpcDesc do
it 'sends the specified status if BadStatus is raised' do it 'sends the specified status if BadStatus is raised' do
expect(@call).to receive(:remote_read).once.and_return(Object.new) expect(@call).to receive(:remote_read).once.and_return(Object.new)
expect(@call).to receive(:send_status).once.with(@bs_code, 'NOK') expect(@call).to receive(:send_status).once.with(@bs_code, 'NOK', false)
@request_response.run_server_method(@call, method(:bad_status)) @request_response.run_server_method(@call, method(:bad_status))
end end
it 'sends status UNKNOWN if other StandardErrors are raised' do it 'sends status UNKNOWN if other StandardErrors are raised' do
expect(@call).to receive(:remote_read).once.and_return(Object.new) expect(@call).to receive(:remote_read).once.and_return(Object.new)
expect(@call).to receive(:send_status) .once.with(UNKNOWN, @no_reason) expect(@call).to receive(:send_status) .once.with(UNKNOWN, @no_reason,
false)
@request_response.run_server_method(@call, method(:other_error)) @request_response.run_server_method(@call, method(:other_error))
end end
it 'absorbs EventError with no further action' do
expect(@call).to receive(:remote_read).once.and_raise(EventError)
blk = proc do
@request_response.run_server_method(@call, method(:fake_reqresp))
end
expect(&blk).to_not raise_error
end
it 'absorbs CallError with no further action' do it 'absorbs CallError with no further action' do
expect(@call).to receive(:remote_read).once.and_raise(CallError) expect(@call).to receive(:remote_read).once.and_raise(CallError)
blk = proc do blk = proc do
@ -93,8 +85,7 @@ describe GRPC::RpcDesc do
req = Object.new req = Object.new
expect(@call).to receive(:remote_read).once.and_return(req) expect(@call).to receive(:remote_read).once.and_return(req)
expect(@call).to receive(:remote_send).once.with(@ok_response) expect(@call).to receive(:remote_send).once.with(@ok_response)
expect(@call).to receive(:send_status).once.with(OK, 'OK') expect(@call).to receive(:send_status).once.with(OK, 'OK', true)
expect(@call).to receive(:finished).once
@request_response.run_server_method(@call, method(:fake_reqresp)) @request_response.run_server_method(@call, method(:fake_reqresp))
end end
end end
@ -107,23 +98,16 @@ describe GRPC::RpcDesc do
end end
it 'sends the specified status if BadStatus is raised' do it 'sends the specified status if BadStatus is raised' do
expect(@call).to receive(:send_status).once.with(@bs_code, 'NOK') expect(@call).to receive(:send_status).once.with(@bs_code, 'NOK', false)
@client_streamer.run_server_method(@call, method(:bad_status_alt)) @client_streamer.run_server_method(@call, method(:bad_status_alt))
end end
it 'sends status UNKNOWN if other StandardErrors are raised' do it 'sends status UNKNOWN if other StandardErrors are raised' do
expect(@call).to receive(:send_status) .once.with(UNKNOWN, @no_reason) expect(@call).to receive(:send_status) .once.with(UNKNOWN, @no_reason,
false)
@client_streamer.run_server_method(@call, method(:other_error_alt)) @client_streamer.run_server_method(@call, method(:other_error_alt))
end end
it 'absorbs EventError with no further action' do
expect(@call).to receive(:remote_send).once.and_raise(EventError)
blk = proc do
@client_streamer.run_server_method(@call, method(:fake_clstream))
end
expect(&blk).to_not raise_error
end
it 'absorbs CallError with no further action' do it 'absorbs CallError with no further action' do
expect(@call).to receive(:remote_send).once.and_raise(CallError) expect(@call).to receive(:remote_send).once.and_raise(CallError)
blk = proc do blk = proc do
@ -134,8 +118,7 @@ describe GRPC::RpcDesc do
it 'sends a response and closes the stream if there no errors' do it 'sends a response and closes the stream if there no errors' do
expect(@call).to receive(:remote_send).once.with(@ok_response) expect(@call).to receive(:remote_send).once.with(@ok_response)
expect(@call).to receive(:send_status).once.with(OK, 'OK') expect(@call).to receive(:send_status).once.with(OK, 'OK', true)
expect(@call).to receive(:finished).once
@client_streamer.run_server_method(@call, method(:fake_clstream)) @client_streamer.run_server_method(@call, method(:fake_clstream))
end end
end end
@ -149,24 +132,17 @@ describe GRPC::RpcDesc do
it 'sends the specified status if BadStatus is raised' do it 'sends the specified status if BadStatus is raised' do
expect(@call).to receive(:remote_read).once.and_return(Object.new) expect(@call).to receive(:remote_read).once.and_return(Object.new)
expect(@call).to receive(:send_status).once.with(@bs_code, 'NOK') expect(@call).to receive(:send_status).once.with(@bs_code, 'NOK', false)
@server_streamer.run_server_method(@call, method(:bad_status)) @server_streamer.run_server_method(@call, method(:bad_status))
end end
it 'sends status UNKNOWN if other StandardErrors are raised' do it 'sends status UNKNOWN if other StandardErrors are raised' do
expect(@call).to receive(:remote_read).once.and_return(Object.new) expect(@call).to receive(:remote_read).once.and_return(Object.new)
expect(@call).to receive(:send_status) .once.with(UNKNOWN, @no_reason) expect(@call).to receive(:send_status) .once.with(UNKNOWN, @no_reason,
false)
@server_streamer.run_server_method(@call, method(:other_error)) @server_streamer.run_server_method(@call, method(:other_error))
end end
it 'absorbs EventError with no further action' do
expect(@call).to receive(:remote_read).once.and_raise(EventError)
blk = proc do
@server_streamer.run_server_method(@call, method(:fake_svstream))
end
expect(&blk).to_not raise_error
end
it 'absorbs CallError with no further action' do it 'absorbs CallError with no further action' do
expect(@call).to receive(:remote_read).once.and_raise(CallError) expect(@call).to receive(:remote_read).once.and_raise(CallError)
blk = proc do blk = proc do
@ -179,8 +155,7 @@ describe GRPC::RpcDesc do
req = Object.new req = Object.new
expect(@call).to receive(:remote_read).once.and_return(req) expect(@call).to receive(:remote_read).once.and_return(req)
expect(@call).to receive(:remote_send).twice.with(@ok_response) expect(@call).to receive(:remote_send).twice.with(@ok_response)
expect(@call).to receive(:send_status).once.with(OK, 'OK') expect(@call).to receive(:send_status).once.with(OK, 'OK', true)
expect(@call).to receive(:finished).once
@server_streamer.run_server_method(@call, method(:fake_svstream)) @server_streamer.run_server_method(@call, method(:fake_svstream))
end end
end end
@ -197,20 +172,20 @@ describe GRPC::RpcDesc do
it 'sends the specified status if BadStatus is raised' do it 'sends the specified status if BadStatus is raised' do
e = GRPC::BadStatus.new(@bs_code, 'NOK') e = GRPC::BadStatus.new(@bs_code, 'NOK')
expect(@call).to receive(:run_server_bidi).and_raise(e) expect(@call).to receive(:run_server_bidi).and_raise(e)
expect(@call).to receive(:send_status).once.with(@bs_code, 'NOK') expect(@call).to receive(:send_status).once.with(@bs_code, 'NOK', false)
@bidi_streamer.run_server_method(@call, method(:bad_status_alt)) @bidi_streamer.run_server_method(@call, method(:bad_status_alt))
end end
it 'sends status UNKNOWN if other StandardErrors are raised' do it 'sends status UNKNOWN if other StandardErrors are raised' do
expect(@call).to receive(:run_server_bidi).and_raise(StandardError) expect(@call).to receive(:run_server_bidi).and_raise(StandardError)
expect(@call).to receive(:send_status).once.with(UNKNOWN, @no_reason) expect(@call).to receive(:send_status).once.with(UNKNOWN, @no_reason,
false)
@bidi_streamer.run_server_method(@call, method(:other_error_alt)) @bidi_streamer.run_server_method(@call, method(:other_error_alt))
end end
it 'closes the stream if there no errors' do it 'closes the stream if there no errors' do
expect(@call).to receive(:run_server_bidi) expect(@call).to receive(:run_server_bidi)
expect(@call).to receive(:send_status).once.with(OK, 'OK') expect(@call).to receive(:send_status).once.with(OK, 'OK', true)
expect(@call).to receive(:finished).once
@bidi_streamer.run_server_method(@call, method(:fake_bidistream)) @bidi_streamer.run_server_method(@call, method(:fake_bidistream))
end end
end end

@ -364,7 +364,7 @@ describe GRPC::RpcServer do
@srv.wait_till_running @srv.wait_till_running
req = EchoMsg.new req = EchoMsg.new
stub = SlowStub.new(@host, **@client_opts) stub = SlowStub.new(@host, **@client_opts)
deadline = service.delay + 0.5 # wait for long enough deadline = service.delay + 1.0 # wait for long enough
expect(stub.an_rpc(req, deadline, k1: 'v1', k2: 'v2')).to be_a(EchoMsg) expect(stub.an_rpc(req, deadline, k1: 'v1', k2: 'v2')).to be_a(EchoMsg)
wanted_md = [{ 'k1' => 'v1', 'k2' => 'v2' }] wanted_md = [{ 'k1' => 'v1', 'k2' => 'v2' }]
expect(service.received_md).to eq(wanted_md) expect(service.received_md).to eq(wanted_md)

@ -1,64 +0,0 @@
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
require 'grpc'
describe GRPC::Core::Metadata do
describe '#new' do
it 'should create instances' do
expect { GRPC::Core::Metadata.new('a key', 'a value') }.to_not raise_error
end
end
describe '#key' do
md = GRPC::Core::Metadata.new('a key', 'a value')
it 'should be the constructor value' do
expect(md.key).to eq('a key')
end
end
describe '#value' do
md = GRPC::Core::Metadata.new('a key', 'a value')
it 'should be the constuctor value' do
expect(md.value).to eq('a value')
end
end
describe '#dup' do
it 'should create a copy that returns the correct key' do
md = GRPC::Core::Metadata.new('a key', 'a value')
expect(md.dup.key).to eq('a key')
end
it 'should create a copy that returns the correct value' do
md = GRPC::Core::Metadata.new('a key', 'a value')
expect(md.dup.value).to eq('a value')
end
end
end
Loading…
Cancel
Save