Merge branch 'master' into tag_set

pull/4750/head
Alistair Veitch 9 years ago
commit f190601e60
  1. 9
      .gitignore
  2. 14
      MANIFEST.md
  3. 475
      Makefile
  4. 10
      PYTHON-MANIFEST.in
  5. 14
      README.md
  6. 13
      binding.gyp
  7. 68
      build.yaml
  8. 2
      doc/connection-backoff-interop-test-description.md
  9. 10
      include/grpc++/channel.h
  10. 14
      include/grpc++/client_context.h
  11. 10
      include/grpc++/completion_queue.h
  12. 10
      include/grpc++/impl/call.h
  13. 10
      include/grpc++/server_context.h
  14. 2
      include/grpc++/support/async_stream.h
  15. 4
      include/grpc++/support/slice.h
  16. 25
      include/grpc/census.h
  17. 2
      include/grpc/compression.h
  18. 18
      include/grpc/grpc.h
  19. 2
      include/grpc/support/atm.h
  20. 10
      include/grpc/support/atm_gcc_atomic.h
  21. 2
      include/grpc/support/atm_gcc_sync.h
  22. 2
      include/grpc/support/atm_win32.h
  23. 6
      include/grpc/support/histogram.h
  24. 16
      include/grpc/support/port_platform.h
  25. 10
      include/grpc/support/slice.h
  26. 2
      include/grpc/support/slice_buffer.h
  27. 6
      include/grpc/support/sync.h
  28. 2
      include/grpc/support/thd.h
  29. 6
      include/grpc/support/time.h
  30. 2
      include/grpc/support/tls.h
  31. 2
      include/grpc/support/tls_gcc.h
  32. 2
      include/grpc/support/tls_msvc.h
  33. 4
      include/grpc/support/tls_pthread.h
  34. 1
      requirements.txt
  35. 2
      setup.cfg
  36. 74
      setup.py
  37. 2
      src/core/census/grpc_filter.c
  38. 4
      src/core/census/operation.c
  39. 12
      src/core/census/rpc_metric_id.h
  40. 2
      src/core/census/tracing.c
  41. 2
      src/core/channel/channel_stack.c
  42. 9
      src/core/channel/client_channel.c
  43. 10
      src/core/channel/compress_filter.c
  44. 14
      src/core/channel/http_server_filter.c
  45. 4
      src/core/channel/subchannel_call_holder.c
  46. 62
      src/core/client_config/lb_policies/pick_first.c
  47. 4
      src/core/client_config/resolvers/sockaddr_resolver.c
  48. 20
      src/core/client_config/subchannel.c
  49. 2
      src/core/httpcli/format_request.c
  50. 5
      src/core/httpcli/httpcli_security_connector.c
  51. 14
      src/core/httpcli/parser.c
  52. 2
      src/core/httpcli/parser.h
  53. 6
      src/core/iomgr/closure.c
  54. 2
      src/core/iomgr/closure.h
  55. 2
      src/core/iomgr/exec_ctx.c
  56. 41
      src/core/iomgr/fd_posix.c
  57. 11
      src/core/iomgr/fd_posix.h
  58. 2
      src/core/iomgr/iocp_windows.c
  59. 67
      src/core/iomgr/pollset_multipoller_with_epoll.c
  60. 6
      src/core/iomgr/pollset_multipoller_with_poll_posix.c
  61. 15
      src/core/iomgr/pollset_posix.c
  62. 6
      src/core/iomgr/pollset_posix.h
  63. 12
      src/core/iomgr/sockaddr_utils.c
  64. 8
      src/core/iomgr/tcp_posix.c
  65. 8
      src/core/iomgr/tcp_posix.h
  66. 2
      src/core/iomgr/tcp_server_posix.c
  67. 2
      src/core/iomgr/tcp_server_windows.c
  68. 10
      src/core/iomgr/timer.c
  69. 2
      src/core/iomgr/timer.h
  70. 20
      src/core/iomgr/timer_heap.c
  71. 4
      src/core/iomgr/timer_heap.h
  72. 2
      src/core/iomgr/udp_server.c
  73. 23
      src/core/json/json_reader.c
  74. 8
      src/core/json/json_reader.h
  75. 38
      src/core/json/json_string.c
  76. 16
      src/core/json/json_writer.c
  77. 2
      src/core/profiling/basic_timers.c
  78. 18
      src/core/security/base64.c
  79. 8
      src/core/security/client_auth_filter.c
  80. 4
      src/core/security/credentials.c
  81. 6
      src/core/security/json_token.c
  82. 4
      src/core/security/jwt_verifier.c
  83. 22
      src/core/security/secure_endpoint.c
  84. 19
      src/core/security/security_connector.c
  85. 7
      src/core/security/security_connector.h
  86. 4
      src/core/statistics/census_interface.h
  87. 29
      src/core/statistics/census_log.c
  88. 4
      src/core/statistics/census_rpc_stats.c
  89. 6
      src/core/statistics/census_rpc_stats.h
  90. 6
      src/core/statistics/census_tracing.c
  91. 22
      src/core/statistics/hash_table.c
  92. 8
      src/core/statistics/hash_table.h
  93. 27
      src/core/statistics/window_stats.c
  94. 2
      src/core/support/alloc.c
  95. 2
      src/core/support/cpu_posix.c
  96. 10
      src/core/support/histogram.c
  97. 2
      src/core/support/log_posix.c
  98. 22
      src/core/support/murmur_hash.c
  99. 2
      src/core/support/murmur_hash.h
  100. 22
      src/core/support/slice.c
  101. Some files were not shown because too many files have changed in this diff Show More

9
.gitignore vendored

@ -4,8 +4,13 @@ gens
libs
objs
# Python virtual environments
python*_virtual_environment
# Python items
.coverage*
.eggs
.tox
htmlcov/
dist/
*.egg
# gcov coverage data
reports

@ -0,0 +1,14 @@
# Top-level Items by language
## Node
* [binding.gyp](binding.gyp)
## Objective-C
* [gRPC.podspec](gRPC.podspec)
## Python
* [requirements.txt](requirements.txt)
* [setup.cfg](setup.cfg)
* [setup.py](setup.py)
* [tox.ini](tox.ini)
* [PYTHON-MANIFEST.in](PYTHON-MANIFEST.in)

File diff suppressed because it is too large Load Diff

@ -0,0 +1,10 @@
graft src/python/grpcio/grpc
graft src/python/grpcio/tests
graft src/core
graft include/grpc
graft third_party/boringssl
include src/python/grpcio/commands.py
include src/python/grpcio/grpc_core_dependencies.py
include src/python/grpcio/README.rst
include requirements.txt
include etc/roots.pem

@ -11,16 +11,16 @@ You can find more detailed documentation and examples in the [doc](doc) and [exa
#Installation
See grpc/INSTALL for installation instructions for various platforms.
See [grpc/INSTALL](INSTALL) for installation instructions for various platforms.
#Repository Structure & Status
This repository contains source code for gRPC libraries for multiple languages written on top of shared C core library [src/core] (src/core).
Libraries in different languages are in different state of development. We are seeking contributions for all of these libraries.
Libraries in different languages are in different states of development. We are seeking contributions for all of these libraries.
| Language | Source | Status |
|-------------------------|-------------------------------------|---------------------------------|
| Language | Source | Status |
|-------------------------|-------------------------------------|----------------------------------|
| Shared C [core library] | [src/core] (src/core) | Beta - the surface API is stable |
| C++ | [src/cpp] (src/cpp) | Beta - the surface API is stable |
| Ruby | [src/ruby] (src/ruby) | Beta - the surface API is stable |
@ -31,10 +31,12 @@ Libraries in different languages are in different state of development. We are s
| Objective-C | [src/objective-c] (src/objective-c) | Beta - the surface API is stable |
<small>
Java source code is in [grpc-java] (http://github.com/grpc/grpc-java) repository.
Go source code is in [grpc-go] (http://github.com/grpc/grpc-go) repository.
Java source code is in the [grpc-java] (http://github.com/grpc/grpc-java) repository.
Go source code is in the [grpc-go] (http://github.com/grpc/grpc-go) repository.
</small>
See [MANIFEST.md](MANIFEST.md) for a listing of top-level items in the
repository.
#Overview

@ -88,6 +88,11 @@
},
'targets': [
{
'cflags': [
'-std=c99',
'-Wall',
'-Werror'
],
'target_name': 'gpr',
'product_prefix': 'lib',
'type': 'static_library',
@ -145,6 +150,11 @@
],
},
{
'cflags': [
'-std=c99',
'-Wall',
'-Werror'
],
'target_name': 'grpc',
'product_prefix': 'lib',
'type': 'static_library',
@ -317,7 +327,7 @@
"<!(node -e \"require('nan')\")"
],
'cflags': [
'-std=c++0x',
'-std=c++11',
'-Wall',
'-pthread',
'-g',
@ -333,7 +343,6 @@
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9',
'OTHER_CFLAGS': [
'-std=c++11',
'-stdlib=libc++'
]
}

@ -642,9 +642,9 @@ libs:
- test/cpp/util/string_ref_helper.h
- test/cpp/util/subprocess.h
src:
- test/cpp/util/messages.proto
- test/cpp/util/echo.proto
- test/cpp/util/echo_duplicate.proto
- src/proto/grpc/testing/echo_messages.proto
- src/proto/grpc/testing/echo.proto
- src/proto/grpc/testing/duplicate/echo_duplicate.proto
- test/cpp/util/cli_call.cc
- test/cpp/util/create_test_channel.cc
- test/cpp/util/string_ref_helper.cc
@ -700,7 +700,7 @@ libs:
headers:
- test/cpp/interop/client_helper.h
src:
- test/proto/messages.proto
- src/proto/grpc/testing/messages.proto
- test/cpp/interop/client_helper.cc
deps:
- grpc++_test_util
@ -714,9 +714,9 @@ libs:
headers:
- test/cpp/interop/interop_client.h
src:
- test/proto/empty.proto
- test/proto/messages.proto
- test/proto/test.proto
- src/proto/grpc/testing/empty.proto
- src/proto/grpc/testing/messages.proto
- src/proto/grpc/testing/test.proto
- test/cpp/interop/client.cc
- test/cpp/interop/interop_client.cc
deps:
@ -744,9 +744,9 @@ libs:
build: private
language: c++
src:
- test/proto/empty.proto
- test/proto/messages.proto
- test/proto/test.proto
- src/proto/grpc/testing/empty.proto
- src/proto/grpc/testing/messages.proto
- src/proto/grpc/testing/test.proto
- test/cpp/interop/server.cc
deps:
- interop_server_helper
@ -773,12 +773,12 @@ libs:
- test/cpp/qps/timer.h
- test/cpp/util/benchmark_config.h
src:
- test/proto/messages.proto
- test/proto/benchmarks/control.proto
- test/proto/benchmarks/payloads.proto
- test/proto/benchmarks/services.proto
- test/proto/benchmarks/stats.proto
- test/cpp/qps/perf_db.proto
- src/proto/grpc/testing/messages.proto
- src/proto/grpc/testing/control.proto
- src/proto/grpc/testing/payloads.proto
- src/proto/grpc/testing/services.proto
- src/proto/grpc/testing/stats.proto
- src/proto/grpc/testing/perf_db.proto
- test/cpp/qps/client_async.cc
- test/cpp/qps/client_sync.cc
- test/cpp/qps/driver.cc
@ -2097,7 +2097,7 @@ targets:
headers:
- test/cpp/util/metrics_server.h
src:
- test/proto/metrics.proto
- src/proto/grpc/testing/metrics.proto
- test/cpp/interop/metrics_client.cc
deps:
- grpc++
@ -2209,9 +2209,9 @@ targets:
run: false
language: c++
src:
- test/proto/empty.proto
- test/proto/messages.proto
- test/proto/test.proto
- src/proto/grpc/testing/empty.proto
- src/proto/grpc/testing/messages.proto
- src/proto/grpc/testing/test.proto
- test/cpp/interop/reconnect_interop_client.cc
deps:
- grpc++_test_util
@ -2226,9 +2226,9 @@ targets:
run: false
language: c++
src:
- test/proto/empty.proto
- test/proto/messages.proto
- test/proto/test.proto
- src/proto/grpc/testing/empty.proto
- src/proto/grpc/testing/messages.proto
- src/proto/grpc/testing/test.proto
- test/cpp/interop/reconnect_interop_server.cc
deps:
- reconnect_server
@ -2347,10 +2347,10 @@ targets:
- test/cpp/interop/stress_interop_client.h
- test/cpp/util/metrics_server.h
src:
- test/proto/empty.proto
- test/proto/messages.proto
- test/proto/metrics.proto
- test/proto/test.proto
- src/proto/grpc/testing/empty.proto
- src/proto/grpc/testing/messages.proto
- src/proto/grpc/testing/metrics.proto
- src/proto/grpc/testing/test.proto
- test/cpp/interop/interop_client.cc
- test/cpp/interop/stress_interop_client.cc
- test/cpp/interop/stress_test.cc
@ -2414,6 +2414,7 @@ targets:
run: false
language: c++
src:
- src/proto/grpc/testing/echo.proto
- test/cpp/end2end/zookeeper_test.cc
deps:
- grpc++_test_util
@ -2427,6 +2428,14 @@ targets:
- zookeeper
platforms:
- linux
- name: public_headers_must_be_c89
build: test
language: c89
src:
- test/core/surface/public_headers_must_be_c89.c
deps:
- grpc
- gpr
vspackages:
- linkage: static
name: grpc.dependencies.zlib
@ -2478,3 +2487,8 @@ node_modules:
- src/node/ext/server.cc
- src/node/ext/server_credentials.cc
- src/node/ext/timeval.cc
python_dependencies:
deps:
- grpc
- gpr
- boringssl

@ -18,7 +18,7 @@ which translates to about 13 retries.
are conforming the spec or do its own check on the backoffs in the response.
Client and server use
[test.proto](https://github.com/grpc/grpc/blob/master/test/proto/test.proto).
[test.proto](https://github.com/grpc/grpc/blob/master/src/proto/grpc/testing/test.proto).
Each language should implement its own client. The C++ server is shared among
languages.

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -54,13 +54,13 @@ template <class R>
class ClientReader;
template <class W>
class ClientWriter;
template <class R, class W>
template <class W, class R>
class ClientReaderWriter;
template <class R>
class ClientAsyncReader;
template <class W>
class ClientAsyncWriter;
template <class R, class W>
template <class W, class R>
class ClientAsyncReaderWriter;
template <class R>
class ClientAsyncResponseReader;
@ -98,13 +98,13 @@ class Channel GRPC_FINAL : public GrpcLibrary,
friend class ::grpc::ClientReader;
template <class W>
friend class ::grpc::ClientWriter;
template <class R, class W>
template <class W, class R>
friend class ::grpc::ClientReaderWriter;
template <class R>
friend class ::grpc::ClientAsyncReader;
template <class W>
friend class ::grpc::ClientAsyncWriter;
template <class R, class W>
template <class W, class R>
friend class ::grpc::ClientAsyncReaderWriter;
template <class R>
friend class ::grpc::ClientAsyncResponseReader;

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -76,13 +76,13 @@ template <class R>
class ClientReader;
template <class W>
class ClientWriter;
template <class R, class W>
template <class W, class R>
class ClientReaderWriter;
template <class R>
class ClientAsyncReader;
template <class W>
class ClientAsyncWriter;
template <class R, class W>
template <class W, class R>
class ClientAsyncReaderWriter;
template <class R>
class ClientAsyncResponseReader;
@ -137,10 +137,10 @@ class PropagationOptions {
return *this;
}
gpr_uint32 c_bitmask() const { return propagate_; }
uint32_t c_bitmask() const { return propagate_; }
private:
gpr_uint32 propagate_;
uint32_t propagate_;
};
namespace testing {
@ -304,13 +304,13 @@ class ClientContext {
friend class ::grpc::ClientReader;
template <class W>
friend class ::grpc::ClientWriter;
template <class R, class W>
template <class W, class R>
friend class ::grpc::ClientReaderWriter;
template <class R>
friend class ::grpc::ClientAsyncReader;
template <class W>
friend class ::grpc::ClientAsyncWriter;
template <class R, class W>
template <class W, class R>
friend class ::grpc::ClientAsyncReaderWriter;
template <class R>
friend class ::grpc::ClientAsyncResponseReader;

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -49,13 +49,13 @@ template <class R>
class ClientReader;
template <class W>
class ClientWriter;
template <class R, class W>
template <class W, class R>
class ClientReaderWriter;
template <class R>
class ServerReader;
template <class W>
class ServerWriter;
template <class R, class W>
template <class W, class R>
class ServerReaderWriter;
template <class ServiceType, class RequestType, class ResponseType>
class RpcMethodHandler;
@ -151,13 +151,13 @@ class CompletionQueue : public GrpcLibrary {
friend class ::grpc::ClientReader;
template <class W>
friend class ::grpc::ClientWriter;
template <class R, class W>
template <class W, class R>
friend class ::grpc::ClientReaderWriter;
template <class R>
friend class ::grpc::ServerReader;
template <class W>
friend class ::grpc::ServerWriter;
template <class R, class W>
template <class W, class R>
friend class ::grpc::ServerReaderWriter;
template <class ServiceType, class RequestType, class ResponseType>
friend class RpcMethodHandler;

@ -70,7 +70,7 @@ class WriteOptions {
inline void Clear() { flags_ = 0; }
/// Returns raw flags bitset.
inline gpr_uint32 flags() const { return flags_; }
inline uint32_t flags() const { return flags_; }
/// Sets flag for the disabling of compression for the next message write.
///
@ -126,13 +126,13 @@ class WriteOptions {
}
private:
void SetBit(const gpr_uint32 mask) { flags_ |= mask; }
void SetBit(const uint32_t mask) { flags_ |= mask; }
void ClearBit(const gpr_uint32 mask) { flags_ &= ~mask; }
void ClearBit(const uint32_t mask) { flags_ &= ~mask; }
bool GetBit(const gpr_uint32 mask) const { return (flags_ & mask) != 0; }
bool GetBit(const uint32_t mask) const { return (flags_ & mask) != 0; }
gpr_uint32 flags_;
uint32_t flags_;
};
/// Default argument for CallOpSet. I is unused by the class, but can be

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -58,13 +58,13 @@ template <class W>
class ServerAsyncWriter;
template <class W>
class ServerAsyncResponseWriter;
template <class R, class W>
template <class W, class R>
class ServerAsyncReaderWriter;
template <class R>
class ServerReader;
template <class W>
class ServerWriter;
template <class R, class W>
template <class W, class R>
class ServerReaderWriter;
template <class ServiceType, class RequestType, class ResponseType>
class RpcMethodHandler;
@ -145,13 +145,13 @@ class ServerContext {
friend class ::grpc::ServerAsyncWriter;
template <class W>
friend class ::grpc::ServerAsyncResponseWriter;
template <class R, class W>
template <class W, class R>
friend class ::grpc::ServerAsyncReaderWriter;
template <class R>
friend class ::grpc::ServerReader;
template <class W>
friend class ::grpc::ServerWriter;
template <class R, class W>
template <class W, class R>
friend class ::grpc::ServerReaderWriter;
template <class ServiceType, class RequestType, class ResponseType>
friend class RpcMethodHandler;

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

@ -72,10 +72,10 @@ class Slice GRPC_FINAL {
size_t size() const { return GPR_SLICE_LENGTH(slice_); }
/// Raw pointer to the beginning (first element) of the slice.
const gpr_uint8* begin() const { return GPR_SLICE_START_PTR(slice_); }
const uint8_t* begin() const { return GPR_SLICE_START_PTR(slice_); }
/// Raw pointer to the end (one byte \em past the last element) of the slice.
const gpr_uint8* end() const { return GPR_SLICE_END_PTR(slice_); }
const uint8_t* end() const { return GPR_SLICE_END_PTR(slice_); }
private:
friend class ByteBuffer;

@ -166,8 +166,8 @@ census_timestamp census_start_rpc_op_timestamp(void);
functions, maybe it should be set once at census initialization.
*/
typedef struct {
const char *(*get_rpc_service_name)(gpr_int64 id);
const char *(*get_rpc_method_name)(gpr_int64 id);
const char *(*get_rpc_service_name)(int64_t id);
const char *(*get_rpc_method_name)(int64_t id);
} census_rpc_name_info;
/**
@ -205,7 +205,7 @@ typedef struct {
@return A new census context.
*/
census_context *census_start_client_rpc_op(
const census_context *context, gpr_int64 rpc_name_id,
const census_context *context, int64_t rpc_name_id,
const census_rpc_name_info *rpc_name_info, const char *peer, int trace_mask,
const census_timestamp *start_time);
@ -233,7 +233,7 @@ void census_set_rpc_client_peer(census_context *context, const char *peer);
@return A new census context.
*/
census_context *census_start_server_rpc_op(
const char *buffer, gpr_int64 rpc_name_id,
const char *buffer, int64_t rpc_name_id,
const census_rpc_name_info *rpc_name_info, const char *peer, int trace_mask,
census_timestamp *start_time);
@ -276,8 +276,8 @@ census_context *census_start_op(census_context *context, const char *family,
*/
void census_end_op(census_context *context, int status);
#define CENSUS_TRACE_RECORD_START_OP ((gpr_uint32)0)
#define CENSUS_TRACE_RECORD_END_OP ((gpr_uint32)1)
#define CENSUS_TRACE_RECORD_START_OP ((uint32_t)0)
#define CENSUS_TRACE_RECORD_END_OP ((uint32_t)1)
/** Insert a trace record into the trace stream. The record consists of an
arbitrary size buffer, the size of which is provided in 'n'.
@ -286,15 +286,15 @@ void census_end_op(census_context *context, int status);
@param buffer Pointer to buffer to use
@param n Number of bytes in buffer
*/
void census_trace_print(census_context *context, gpr_uint32 type,
void census_trace_print(census_context *context, uint32_t type,
const char *buffer, size_t n);
/** Trace record. */
typedef struct {
census_timestamp timestamp; /* Time of record creation */
gpr_uint64 trace_id; /* Trace ID associated with record */
gpr_uint64 op_id; /* Operation ID associated with record */
gpr_uint32 type; /* Type (as used in census_trace_print() */
uint64_t trace_id; /* Trace ID associated with record */
uint64_t op_id; /* Operation ID associated with record */
uint32_t type; /* Type (as used in census_trace_print() */
const char *buffer; /* Buffer (from census_trace_print() */
size_t buf_size; /* Number of bytes inside buffer */
} census_trace_record;
@ -413,7 +413,7 @@ census_tag_set *census_context_tag_set(census_context *context);
/* A single value to be recorded comprises two parts: an ID for the particular
* metric and the value to be recorded against it. */
typedef struct {
gpr_uint32 metric_id;
uint32_t metric_id;
double value;
} census_value;
@ -449,8 +449,7 @@ typedef struct census_view census_view;
@return A new census view
*/
census_view *census_view_create(gpr_uint32 metric_id,
const census_tag_set *tags,
census_view *census_view_create(uint32_t metric_id, const census_tag_set *tags,
const census_aggregation *aggregations,
size_t naggregations);

@ -64,7 +64,7 @@ typedef enum {
} grpc_compression_level;
typedef struct grpc_compression_options {
gpr_uint32 enabled_algorithms_bitset; /**< All algs are enabled by default */
uint32_t enabled_algorithms_bitset; /**< All algs are enabled by default */
grpc_compression_algorithm default_compression_algorithm; /**< for channel */
} grpc_compression_options;

@ -222,7 +222,7 @@ typedef struct grpc_metadata {
const char *key;
const char *value;
size_t value_length;
gpr_uint32 flags;
uint32_t flags;
/** The following fields are reserved for grpc internal use.
There is no need to initialize them, and they will be set to garbage
@ -326,7 +326,7 @@ typedef struct grpc_op {
/** Operation type, as defined by grpc_op_type */
grpc_op_type op;
/** Write flags bitset for grpc_begin_messages */
gpr_uint32 flags;
uint32_t flags;
/** Reserved for future usage */
void *reserved;
union {
@ -408,12 +408,12 @@ void grpc_register_plugin(void (*init)(void), void (*destroy)(void));
/* Propagation bits: this can be bitwise or-ed to form propagation_mask for
* grpc_call */
/** Propagate deadline */
#define GRPC_PROPAGATE_DEADLINE ((gpr_uint32)1)
#define GRPC_PROPAGATE_DEADLINE ((uint32_t)1)
/** Propagate census context */
#define GRPC_PROPAGATE_CENSUS_STATS_CONTEXT ((gpr_uint32)2)
#define GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT ((gpr_uint32)4)
#define GRPC_PROPAGATE_CENSUS_STATS_CONTEXT ((uint32_t)2)
#define GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT ((uint32_t)4)
/** Propagate cancellation */
#define GRPC_PROPAGATE_CANCELLATION ((gpr_uint32)8)
#define GRPC_PROPAGATE_CANCELLATION ((uint32_t)8)
/* Default propagation mask: clients of the core API are encouraged to encode
deltas from this in their implementations... ie write:
@ -421,7 +421,7 @@ void grpc_register_plugin(void (*init)(void), void (*destroy)(void));
propagation. Doing so gives flexibility in the future to define new
propagation types that are default inherited or not. */
#define GRPC_PROPAGATE_DEFAULTS \
((gpr_uint32)(( \
((uint32_t)(( \
0xffff | GRPC_PROPAGATE_DEADLINE | GRPC_PROPAGATE_CENSUS_STATS_CONTEXT | \
GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT | GRPC_PROPAGATE_CANCELLATION)))
@ -526,7 +526,7 @@ void grpc_channel_watch_connectivity_state(
*/
grpc_call *grpc_channel_create_call(grpc_channel *channel,
grpc_call *parent_call,
gpr_uint32 propagation_mask,
uint32_t propagation_mask,
grpc_completion_queue *completion_queue,
const char *method, const char *host,
gpr_timespec deadline, void *reserved);
@ -542,7 +542,7 @@ void *grpc_channel_register_call(grpc_channel *channel, const char *method,
/** Create a call given a handle returned from grpc_channel_register_call */
grpc_call *grpc_channel_create_registered_call(
grpc_channel *channel, grpc_call *parent_call, gpr_uint32 propagation_mask,
grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask,
grpc_completion_queue *completion_queue, void *registered_call_handle,
gpr_timespec deadline, void *reserved);

@ -53,7 +53,7 @@
// Atomic operations act on an intergral_type gpr_atm that is guaranteed to
// be the same size as a pointer.
typedef gpr_intptr gpr_atm;
typedef intptr_t gpr_atm;
// A memory barrier, providing both acquire and release semantics, but not
// otherwise acting on memory.

@ -38,21 +38,21 @@
__atomic_* interface. */
#include <grpc/support/port_platform.h>
typedef gpr_intptr gpr_atm;
typedef intptr_t gpr_atm;
#define gpr_atm_full_barrier() (__atomic_thread_fence(__ATOMIC_SEQ_CST))
#define gpr_atm_acq_load(p) (__atomic_load_n((p), __ATOMIC_ACQUIRE))
#define gpr_atm_no_barrier_load(p) (__atomic_load_n((p), __ATOMIC_RELAXED))
#define gpr_atm_rel_store(p, value) \
(__atomic_store_n((p), (gpr_intptr)(value), __ATOMIC_RELEASE))
(__atomic_store_n((p), (intptr_t)(value), __ATOMIC_RELEASE))
#define gpr_atm_no_barrier_store(p, value) \
(__atomic_store_n((p), (gpr_intptr)(value), __ATOMIC_RELAXED))
(__atomic_store_n((p), (intptr_t)(value), __ATOMIC_RELAXED))
#define gpr_atm_no_barrier_fetch_add(p, delta) \
(__atomic_fetch_add((p), (gpr_intptr)(delta), __ATOMIC_RELAXED))
(__atomic_fetch_add((p), (intptr_t)(delta), __ATOMIC_RELAXED))
#define gpr_atm_full_fetch_add(p, delta) \
(__atomic_fetch_add((p), (gpr_intptr)(delta), __ATOMIC_ACQ_REL))
(__atomic_fetch_add((p), (intptr_t)(delta), __ATOMIC_ACQ_REL))
static __inline int gpr_atm_no_barrier_cas(gpr_atm *p, gpr_atm o, gpr_atm n) {
return __atomic_compare_exchange_n(p, &o, n, 0, __ATOMIC_RELAXED,

@ -38,7 +38,7 @@
interface */
#include <grpc/support/port_platform.h>
typedef gpr_intptr gpr_atm;
typedef intptr_t gpr_atm;
#define GPR_ATM_COMPILE_BARRIER_() __asm__ __volatile__("" : : : "memory")

@ -37,7 +37,7 @@
/* Win32 variant of atm_platform.h */
#include <grpc/support/port_platform.h>
typedef gpr_intptr gpr_atm;
typedef intptr_t gpr_atm;
#define gpr_atm_full_barrier MemoryBarrier

@ -62,10 +62,10 @@ double gpr_histogram_count(gpr_histogram *histogram);
double gpr_histogram_sum(gpr_histogram *histogram);
double gpr_histogram_sum_of_squares(gpr_histogram *histogram);
const gpr_uint32 *gpr_histogram_get_contents(gpr_histogram *histogram,
size_t *count);
const uint32_t *gpr_histogram_get_contents(gpr_histogram *histogram,
size_t *count);
void gpr_histogram_merge_contents(gpr_histogram *histogram,
const gpr_uint32 *data, size_t data_count,
const uint32_t *data, size_t data_count,
double min_seen, double max_seen, double sum,
double sum_of_squares, double count);

@ -314,22 +314,6 @@
#error Must define exactly one of GPR_MSVC_TLS, GPR_GCC_TLS, GPR_PTHREAD_TLS, GPR_CUSTOM_TLS
#endif
typedef int16_t gpr_int16;
typedef int32_t gpr_int32;
typedef int64_t gpr_int64;
typedef uint8_t gpr_uint8;
typedef uint16_t gpr_uint16;
typedef uint32_t gpr_uint32;
typedef uint64_t gpr_uint64;
typedef intmax_t gpr_intmax;
typedef intptr_t gpr_intptr;
typedef uintmax_t gpr_uintmax;
typedef uintptr_t gpr_uintptr;
/* INT64_MAX is unavailable on some platforms. */
#define GPR_INT64_MAX (gpr_int64)(~(gpr_uint64)0 >> 1)
#define GPR_UINT32_MAX (~(gpr_uint32)0)
/* maximum alignment needed for any type on this platform, rounded up to a
power of two */
#define GPR_MAX_ALIGNMENT 16

@ -65,7 +65,7 @@ typedef struct gpr_slice_refcount {
void (*unref)(void *);
} gpr_slice_refcount;
#define GPR_SLICE_INLINED_SIZE (sizeof(size_t) + sizeof(gpr_uint8 *) - 1)
#define GPR_SLICE_INLINED_SIZE (sizeof(size_t) + sizeof(uint8_t *) - 1)
/* A gpr_slice s, if initialized, represents the byte range
s.bytes[0..s.length-1].
@ -80,12 +80,12 @@ typedef struct gpr_slice {
struct gpr_slice_refcount *refcount;
union {
struct {
gpr_uint8 *bytes;
uint8_t *bytes;
size_t length;
} refcounted;
struct {
gpr_uint8 length;
gpr_uint8 bytes[GPR_SLICE_INLINED_SIZE];
uint8_t length;
uint8_t bytes[GPR_SLICE_INLINED_SIZE];
} inlined;
} data;
} gpr_slice;
@ -98,7 +98,7 @@ typedef struct gpr_slice {
: (slice).data.inlined.length)
#define GPR_SLICE_SET_LENGTH(slice, newlen) \
((slice).refcount ? ((slice).data.refcounted.length = (size_t)(newlen)) \
: ((slice).data.inlined.length = (gpr_uint8)(newlen)))
: ((slice).data.inlined.length = (uint8_t)(newlen)))
#define GPR_SLICE_END_PTR(slice) \
GPR_SLICE_START_PTR(slice) + GPR_SLICE_LENGTH(slice)
#define GPR_SLICE_IS_EMPTY(slice) (GPR_SLICE_LENGTH(slice) == 0)

@ -77,7 +77,7 @@ size_t gpr_slice_buffer_add_indexed(gpr_slice_buffer *sb, gpr_slice slice);
void gpr_slice_buffer_addn(gpr_slice_buffer *sb, gpr_slice *slices, size_t n);
/* add a very small (less than 8 bytes) amount of data to the end of a slice
buffer: returns a pointer into which to add the data */
gpr_uint8 *gpr_slice_buffer_tiny_add(gpr_slice_buffer *sb, size_t len);
uint8_t *gpr_slice_buffer_tiny_add(gpr_slice_buffer *sb, size_t len);
/* pop the last buffer, but don't unref it */
void gpr_slice_buffer_pop(gpr_slice_buffer *sb);
/* clear a slice buffer, unref all elements */

@ -197,13 +197,13 @@ int gpr_unref(gpr_refcount *r);
synchronize other events. */
/* Initialize *c to the value n. */
void gpr_stats_init(gpr_stats_counter *c, gpr_intptr n);
void gpr_stats_init(gpr_stats_counter *c, intptr_t n);
/* *c += inc. Requires: *c initialized. */
void gpr_stats_inc(gpr_stats_counter *c, gpr_intptr inc);
void gpr_stats_inc(gpr_stats_counter *c, intptr_t inc);
/* Return *c. Requires: *c initialized. */
gpr_intptr gpr_stats_read(const gpr_stats_counter *c);
intptr_t gpr_stats_read(const gpr_stats_counter *c);
/* ==================Example use of interface===================
A producer-consumer queue of up to N integers,

@ -48,7 +48,7 @@
extern "C" {
#endif
typedef gpr_uint64 gpr_thd_id;
typedef uint64_t gpr_thd_id;
/* Thread creation options. */
typedef struct {

@ -61,8 +61,8 @@ typedef enum {
} gpr_clock_type;
typedef struct gpr_timespec {
gpr_int64 tv_sec;
gpr_int32 tv_nsec;
int64_t tv_sec;
int32_t tv_nsec;
/** Against which clock was this time measured? (or GPR_TIMESPAN if
this is a relative time meaure) */
gpr_clock_type clock_type;
@ -110,7 +110,7 @@ gpr_timespec gpr_time_from_seconds(long x, gpr_clock_type clock_type);
gpr_timespec gpr_time_from_minutes(long x, gpr_clock_type clock_type);
gpr_timespec gpr_time_from_hours(long x, gpr_clock_type clock_type);
gpr_int32 gpr_time_to_millis(gpr_timespec timespec);
int32_t gpr_time_to_millis(gpr_timespec timespec);
/* Return 1 if two times are equal or within threshold of each other,
0 otherwise */

@ -41,7 +41,7 @@
A minimal wrapper that should be implementable across many compilers,
and implementable efficiently across most modern compilers.
Thread locals have type gpr_intptr.
Thread locals have type intptr_t.
Declaring a thread local variable 'foo':
GPR_TLS_DECL(foo);

@ -38,7 +38,7 @@
#include tls.h to use this - and see that file for documentation */
struct gpr_gcc_thread_local {
gpr_intptr value;
intptr_t value;
};
#define GPR_TLS_DECL(name) \

@ -38,7 +38,7 @@
#include tls.h to use this - and see that file for documentation */
struct gpr_msvc_thread_local {
gpr_intptr value;
intptr_t value;
};
#define GPR_TLS_DECL(name) \

@ -48,11 +48,11 @@ struct gpr_pthread_thread_local {
#define gpr_tls_init(tls) GPR_ASSERT(0 == pthread_key_create(&(tls)->key, NULL))
#define gpr_tls_destroy(tls) pthread_key_delete((tls)->key)
#define gpr_tls_get(tls) ((gpr_intptr)pthread_getspecific((tls)->key))
#define gpr_tls_get(tls) ((intptr_t)pthread_getspecific((tls)->key))
#ifdef __cplusplus
extern "C" {
#endif
gpr_intptr gpr_tls_set(struct gpr_pthread_thread_local *tls, gpr_intptr value);
intptr_t gpr_tls_set(struct gpr_pthread_thread_local *tls, intptr_t value);
#ifdef __cplusplus
}
#endif

@ -1,3 +1,4 @@
# GRPC Python setup requirements
enum34>=1.0.4
futures>=2.2.0
cython>=0.23

@ -1,3 +1,5 @@
# Setup settings for GRPC Python
[coverage:run]
plugins = Cython.Coverage

@ -1,4 +1,4 @@
# Copyright 2015, Google Inc.
# Copyright 2015-2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@ -31,17 +31,28 @@
import os
import os.path
import shutil
import sys
from distutils import core as _core
from distutils import extension as _extension
import setuptools
from setuptools.command import egg_info
# Redirect the manifest template from MANIFEST.in to PYTHON-MANIFEST.in.
egg_info.manifest_maker.template = 'PYTHON-MANIFEST.in'
PYTHON_STEM = './src/python/grpcio/'
CORE_INCLUDE = ('./include', './',)
BORINGSSL_INCLUDE = ('./third_party/boringssl/include',)
# Ensure we're in the proper directory whether or not we're being used by pip.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, PYTHON_STEM)
# Break import-style to ensure we can actually find our commands module.
# Break import-style to ensure we can actually find our in-repo dependencies.
import commands
import grpc_core_dependencies
# Environment variable to determine whether or not the Cython extension should
# *use* Cython or use the generated C files. Note that this requires the C files
@ -59,44 +70,44 @@ INSTALL_TESTS = os.environ.get('GRPC_PYTHON_INSTALL_TESTS', False)
CYTHON_EXTENSION_PACKAGE_NAMES = ()
CYTHON_EXTENSION_MODULE_NAMES = (
'grpc._cython.cygrpc',
'grpc._cython._cygrpc.call',
'grpc._cython._cygrpc.channel',
'grpc._cython._cygrpc.completion_queue',
'grpc._cython._cygrpc.credentials',
'grpc._cython._cygrpc.records',
'grpc._cython._cygrpc.server',
)
CYTHON_EXTENSION_MODULE_NAMES = ('grpc._cython.cygrpc',)
EXTENSION_INCLUDE_DIRECTORIES = (
'.',
)
(PYTHON_STEM,) + CORE_INCLUDE + BORINGSSL_INCLUDE)
EXTENSION_LIBRARIES = (
'grpc',
'gpr',
)
EXTENSION_LIBRARIES = ()
if not "darwin" in sys.platform:
EXTENSION_LIBRARIES += ('rt',)
EXTRA_COMPILE_ARGS = ()
if not "win" in sys.platform:
EXTRA_COMPILE_ARGS = ('-pthread',)
DEFINE_MACROS = (('OPENSSL_NO_ASM', 1),)
def cython_extensions(package_names, module_names, include_dirs, libraries,
define_macros, extra_compile_args,
build_with_cython=False):
if ENABLE_CYTHON_TRACING:
define_macros = define_macros + [('CYTHON_TRACE_NOGIL', 1)]
file_extension = 'pyx' if build_with_cython else 'c'
module_files = [name.replace('.', '/') + '.' + file_extension
module_files = [os.path.join(PYTHON_STEM,
name.replace('.', '/') + '.' + file_extension)
for name in module_names]
extensions = [
_extension.Extension(
name=module_name, sources=[module_file],
name=module_name,
sources=[module_file] + grpc_core_dependencies.CORE_SOURCE_FILES,
include_dirs=include_dirs, libraries=libraries,
define_macros=[('CYTHON_TRACE_NOGIL', 1)] if ENABLE_CYTHON_TRACING else []
extra_compile_args=extra_compile_args,
define_macros=define_macros,
) for (module_name, module_file) in zip(module_names, module_files)
]
if build_with_cython:
import Cython.Build
return Cython.Build.cythonize(
extensions,
include_path=include_dirs,
compiler_directives={'linetrace': bool(ENABLE_CYTHON_TRACING)})
else:
return extensions
@ -104,10 +115,10 @@ def cython_extensions(package_names, module_names, include_dirs, libraries,
CYTHON_EXTENSION_MODULES = cython_extensions(
list(CYTHON_EXTENSION_PACKAGE_NAMES), list(CYTHON_EXTENSION_MODULE_NAMES),
list(EXTENSION_INCLUDE_DIRECTORIES), list(EXTENSION_LIBRARIES),
bool(BUILD_WITH_CYTHON))
list(DEFINE_MACROS), list(EXTRA_COMPILE_ARGS), bool(BUILD_WITH_CYTHON))
PACKAGE_DIRECTORIES = {
'': '.',
'': PYTHON_STEM,
}
INSTALL_REQUIRES = (
@ -128,6 +139,14 @@ COMMAND_CLASS = {
'run_interop': commands.RunInterop,
}
# Ensure that package data is copied over before any commands have been run:
credentials_dir = os.path.join(PYTHON_STEM, 'grpc/_adapter/credentials')
try:
os.mkdir(credentials_dir)
except OSError:
pass
shutil.copyfile('etc/roots.pem', os.path.join(credentials_dir, 'roots.pem'))
TEST_PACKAGE_DATA = {
'tests.interop': [
'credentials/ca.pem',
@ -142,6 +161,9 @@ TEST_PACKAGE_DATA = {
'credentials/server1.key',
'credentials/server1.pem',
],
'grpc._adapter': [
'credentials/roots.pem'
],
}
TESTS_REQUIRE = (
@ -157,16 +179,18 @@ TEST_RUNNER = 'tests:Runner'
PACKAGE_DATA = {}
if INSTALL_TESTS:
PACKAGE_DATA = dict(PACKAGE_DATA, **TEST_PACKAGE_DATA)
PACKAGES = setuptools.find_packages('.')
PACKAGES = setuptools.find_packages(PYTHON_STEM)
else:
PACKAGES = setuptools.find_packages('.', exclude=['tests', 'tests.*'])
PACKAGES = setuptools.find_packages(
PYTHON_STEM, exclude=['tests', 'tests.*'])
setuptools.setup(
name='grpcio',
version='0.12.0b0',
version='0.12.0b1',
ext_modules=CYTHON_EXTENSION_MODULES,
packages=list(PACKAGES),
package_dir=PACKAGE_DIRECTORIES,
package_data=PACKAGE_DATA,
install_requires=INSTALL_REQUIRES,
setup_requires=SETUP_REQUIRES,
cmdclass=COMMAND_CLASS,

@ -59,7 +59,7 @@ typedef struct call_data {
grpc_closure finish_recv;
} call_data;
typedef struct channel_data { gpr_uint8 unused; } channel_data;
typedef struct channel_data { uint8_t unused; } channel_data;
static void extract_and_annotate_method_tag(grpc_metadata_batch *md,
call_data *calld,

@ -42,14 +42,14 @@ census_timestamp census_start_rpc_op_timestamp(void) {
}
census_context *census_start_client_rpc_op(
const census_context *context, gpr_int64 rpc_name_id,
const census_context *context, int64_t rpc_name_id,
const census_rpc_name_info *rpc_name_info, const char *peer, int trace_mask,
const census_timestamp *start_time) {
return NULL;
}
census_context *census_start_server_rpc_op(
const char *buffer, gpr_int64 rpc_name_id,
const char *buffer, int64_t rpc_name_id,
const census_rpc_name_info *rpc_name_info, const char *peer, int trace_mask,
census_timestamp *start_time) {
return NULL;

@ -36,16 +36,16 @@
/* Metric ID's used for RPC measurements. */
/* Count of client requests sent. */
#define CENSUS_METRIC_RPC_CLIENT_REQUESTS ((gpr_uint32)0)
#define CENSUS_METRIC_RPC_CLIENT_REQUESTS ((uint32_t)0)
/* Count of server requests sent. */
#define CENSUS_METRIC_RPC_SERVER_REQUESTS ((gpr_uint32)1)
#define CENSUS_METRIC_RPC_SERVER_REQUESTS ((uint32_t)1)
/* Client error counts. */
#define CENSUS_METRIC_RPC_CLIENT_ERRORS ((gpr_uint32)2)
#define CENSUS_METRIC_RPC_CLIENT_ERRORS ((uint32_t)2)
/* Server error counts. */
#define CENSUS_METRIC_RPC_SERVER_ERRORS ((gpr_uint32)3)
#define CENSUS_METRIC_RPC_SERVER_ERRORS ((uint32_t)3)
/* Client side request latency. */
#define CENSUS_METRIC_RPC_CLIENT_LATENCY ((gpr_uint32)4)
#define CENSUS_METRIC_RPC_CLIENT_LATENCY ((uint32_t)4)
/* Server side request latency. */
#define CENSUS_METRIC_RPC_SERVER_LATENCY ((gpr_uint32)5)
#define CENSUS_METRIC_RPC_SERVER_LATENCY ((uint32_t)5)
#endif /* CENSUS_RPC_METRIC_ID_H */

@ -41,5 +41,5 @@ int census_trace_mask(const census_context *context) {
void census_set_trace_mask(int trace_mask) {}
void census_trace_print(census_context *context, gpr_uint32 type,
void census_trace_print(census_context *context, uint32_t type,
const char *buffer, size_t n) {}

@ -137,7 +137,7 @@ void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs,
}
GPR_ASSERT(user_data > (char *)stack);
GPR_ASSERT((gpr_uintptr)(user_data - (char *)stack) ==
GPR_ASSERT((uintptr_t)(user_data - (char *)stack) ==
grpc_channel_stack_size(filters, filter_count));
stack->call_stack_size = call_size;

@ -353,10 +353,13 @@ static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *elemp,
return 1;
}
if (chand->lb_policy != NULL) {
int r =
grpc_lb_policy_pick(exec_ctx, chand->lb_policy, calld->pollset,
initial_metadata, connected_subchannel, on_ready);
grpc_lb_policy *lb_policy = chand->lb_policy;
int r;
GRPC_LB_POLICY_REF(lb_policy, "cc_pick_subchannel");
gpr_mu_unlock(&chand->mu_config);
r = grpc_lb_policy_pick(exec_ctx, lb_policy, calld->pollset,
initial_metadata, connected_subchannel, on_ready);
GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "cc_pick_subchannel");
return r;
}
if (chand->resolver != NULL && !chand->started_resolving) {

@ -51,7 +51,7 @@ typedef struct call_data {
gpr_slice_buffer slices; /**< Buffers up input slices to be compressed */
grpc_linked_mdelem compression_algorithm_storage;
grpc_linked_mdelem accept_encoding_storage;
gpr_uint32 remaining_slice_bytes;
uint32_t remaining_slice_bytes;
/** Compression algorithm we'll try to use. It may be given by incoming
* metadata, or by the channel's default compression settings. */
grpc_compression_algorithm compression_algorithm;
@ -59,8 +59,8 @@ typedef struct call_data {
int has_compression_algorithm;
grpc_transport_stream_op send_op;
gpr_uint32 send_length;
gpr_uint32 send_flags;
uint32_t send_length;
uint32_t send_flags;
gpr_slice incoming_slice;
grpc_slice_buffer_stream replacement_stream;
grpc_closure *post_send;
@ -74,7 +74,7 @@ typedef struct channel_data {
/** Compression options for the channel */
grpc_compression_options compression_options;
/** Supported compression algorithms */
gpr_uint32 supported_compression_algorithms;
uint32_t supported_compression_algorithms;
} channel_data;
/** For each \a md element from the incoming metadata, filter out the entry for
@ -262,7 +262,7 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_compression_options_init(&channeld->compression_options);
channeld->compression_options.enabled_algorithms_bitset =
(gpr_uint32)grpc_channel_args_compression_algorithm_get_states(
(uint32_t)grpc_channel_args_compression_algorithm_get_states(
args->channel_args);
channeld->default_compression_algorithm =

@ -40,12 +40,12 @@
#include "src/core/transport/static_metadata.h"
typedef struct call_data {
gpr_uint8 seen_path;
gpr_uint8 seen_post;
gpr_uint8 sent_status;
gpr_uint8 seen_scheme;
gpr_uint8 seen_te_trailers;
gpr_uint8 seen_authority;
uint8_t seen_path;
uint8_t seen_post;
uint8_t sent_status;
uint8_t seen_scheme;
uint8_t seen_te_trailers;
uint8_t seen_authority;
grpc_linked_mdelem status;
grpc_linked_mdelem content_type;
@ -58,7 +58,7 @@ typedef struct call_data {
grpc_closure hs_on_recv;
} call_data;
typedef struct channel_data { gpr_uint8 unused; } channel_data;
typedef struct channel_data { uint8_t unused; } channel_data;
typedef struct {
grpc_call_element *elem;

@ -155,7 +155,7 @@ retry:
holder->connected_subchannel != NULL) {
gpr_atm_rel_store(
&holder->subchannel_call,
(gpr_atm)(gpr_uintptr)grpc_connected_subchannel_create_call(
(gpr_atm)(uintptr_t)grpc_connected_subchannel_create_call(
exec_ctx, holder->connected_subchannel, holder->pollset));
retry_waiting_locked(exec_ctx, holder);
goto retry;
@ -180,7 +180,7 @@ static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg, int success) {
} else {
gpr_atm_rel_store(
&holder->subchannel_call,
(gpr_atm)(gpr_uintptr)grpc_connected_subchannel_create_call(
(gpr_atm)(uintptr_t)grpc_connected_subchannel_create_call(
exec_ctx, holder->connected_subchannel, holder->pollset));
retry_waiting_locked(exec_ctx, holder);
}

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -55,12 +55,11 @@ typedef struct {
grpc_closure connectivity_changed;
/** the selected channel (a grpc_connected_subchannel) */
gpr_atm selected;
/** mutex protecting remaining members */
gpr_mu mu;
/** the selected channel
TODO(ctiller): this should be atomically set so we don't
need to take a mutex in the common case */
grpc_connected_subchannel *selected;
/** have we started picking? */
int started_picking;
/** are we shut down? */
@ -76,15 +75,19 @@ typedef struct {
grpc_connectivity_state_tracker state_tracker;
} pick_first_lb_policy;
#define GET_SELECTED(p) \
((grpc_connected_subchannel *)gpr_atm_no_barrier_load(&(p)->selected))
void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
grpc_connected_subchannel *selected = GET_SELECTED(p);
size_t i;
GPR_ASSERT(p->pending_picks == NULL);
for (i = 0; i < p->num_subchannels; i++) {
GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[i], "pick_first");
}
if (p->selected) {
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, p->selected, "picked_first");
if (selected != NULL) {
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, selected, "picked_first");
}
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
gpr_free(p->subchannels);
@ -95,16 +98,18 @@ void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
grpc_connected_subchannel *selected;
gpr_mu_lock(&p->mu);
selected = GET_SELECTED(p);
p->shutdown = 1;
pp = p->pending_picks;
p->pending_picks = NULL;
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_FATAL_FAILURE, "shutdown");
/* cancel subscription */
if (p->selected != NULL) {
if (selected != NULL) {
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, p->selected, NULL, NULL, &p->connectivity_changed);
exec_ctx, selected, NULL, NULL, &p->connectivity_changed);
} else {
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel], NULL, NULL,
@ -171,10 +176,20 @@ int pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_pollset *pollset,
grpc_connected_subchannel **target, grpc_closure *on_complete) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
/* Check atomically for a selected channel */
grpc_connected_subchannel *selected = GET_SELECTED(p);
if (selected != NULL) {
*target = selected;
return 1;
}
/* No subchannel selected yet, so acquire lock and then attempt again */
gpr_mu_lock(&p->mu);
if (p->selected) {
selected = GET_SELECTED(p);
if (selected) {
gpr_mu_unlock(&p->mu);
*target = p->selected;
*target = selected;
return 1;
} else {
if (!p->started_picking) {
@ -219,14 +234,17 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
pick_first_lb_policy *p = arg;
grpc_subchannel *selected_subchannel;
pending_pick *pp;
grpc_connected_subchannel *selected;
gpr_mu_lock(&p->mu);
selected = GET_SELECTED(p);
if (p->shutdown) {
gpr_mu_unlock(&p->mu);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
return;
} else if (p->selected != NULL) {
} else if (selected != NULL) {
if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) {
/* if the selected channel goes bad, we're done */
p->checking_connectivity = GRPC_CHANNEL_FATAL_FAILURE;
@ -235,7 +253,7 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
p->checking_connectivity, "selected_changed");
if (p->checking_connectivity != GRPC_CHANNEL_FATAL_FAILURE) {
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, p->selected, &p->base.interested_parties,
exec_ctx, selected, &p->base.interested_parties,
&p->checking_connectivity, &p->connectivity_changed);
} else {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
@ -247,10 +265,11 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_READY, "connecting_ready");
selected_subchannel = p->subchannels[p->checking_subchannel];
p->selected =
selected =
grpc_subchannel_get_connected_subchannel(selected_subchannel);
GPR_ASSERT(p->selected);
GRPC_CONNECTED_SUBCHANNEL_REF(p->selected, "picked_first");
GPR_ASSERT(selected != NULL);
gpr_atm_no_barrier_store(&p->selected, (gpr_atm)selected);
GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked_first");
/* drop the pick list: we are connected now */
GRPC_LB_POLICY_WEAK_REF(&p->base, "destroy_subchannels");
grpc_exec_ctx_enqueue(exec_ctx,
@ -258,14 +277,14 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
/* update any calls that were waiting for a pick */
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = p->selected;
*pp->target = selected;
grpc_pollset_set_del_pollset(exec_ctx, &p->base.interested_parties,
pp->pollset);
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 1);
gpr_free(pp);
}
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, p->selected, &p->base.interested_parties,
exec_ctx, selected, &p->base.interested_parties,
&p->checking_connectivity, &p->connectivity_changed);
break;
case GRPC_CHANNEL_TRANSIENT_FAILURE:
@ -351,13 +370,12 @@ void pf_notify_on_state_change(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
void pf_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_closure *closure) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
gpr_mu_lock(&p->mu);
if (p->selected) {
grpc_connected_subchannel_ping(exec_ctx, p->selected, closure);
grpc_connected_subchannel *selected = GET_SELECTED(p);
if (selected) {
grpc_connected_subchannel_ping(exec_ctx, selected, closure);
} else {
grpc_exec_ctx_enqueue(exec_ctx, closure, 0);
}
gpr_mu_unlock(&p->mu);
}
static const grpc_lb_policy_vtable pick_first_lb_policy_vtable = {

@ -230,7 +230,7 @@ static int parse_ipv4(grpc_uri *uri, struct sockaddr_storage *addr,
gpr_log(GPR_ERROR, "invalid ipv4 port: '%s'", port);
goto done;
}
in->sin_port = htons((gpr_uint16)port_num);
in->sin_port = htons((uint16_t)port_num);
} else {
gpr_log(GPR_ERROR, "no port given for ipv4 scheme");
goto done;
@ -271,7 +271,7 @@ static int parse_ipv6(grpc_uri *uri, struct sockaddr_storage *addr,
gpr_log(GPR_ERROR, "invalid ipv6 port: '%s'", port);
goto done;
}
in6->sin6_port = htons((gpr_uint16)port_num);
in6->sin6_port = htons((uint16_t)port_num);
} else {
gpr_log(GPR_ERROR, "no port given for ipv6 scheme");
goto done;

@ -131,7 +131,7 @@ struct grpc_subchannel {
/** our alarm */
grpc_timer alarm;
/** current random value */
gpr_uint32 random;
uint32_t random;
};
struct grpc_subchannel_call {
@ -272,8 +272,8 @@ void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx,
}
}
static gpr_uint32 random_seed() {
return (gpr_uint32)(gpr_time_to_millis(gpr_now(GPR_CLOCK_MONOTONIC)));
static uint32_t random_seed() {
return (uint32_t)(gpr_time_to_millis(gpr_now(GPR_CLOCK_MONOTONIC)));
}
grpc_subchannel *grpc_subchannel_create(grpc_connector *connector,
@ -541,15 +541,15 @@ static void publish_transport(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
/* Generate a random number between 0 and 1. */
static double generate_uniform_random_number(grpc_subchannel *c) {
c->random = (1103515245 * c->random + 12345) % ((gpr_uint32)1 << 31);
return c->random / (double)((gpr_uint32)1 << 31);
c->random = (1103515245 * c->random + 12345) % ((uint32_t)1 << 31);
return c->random / (double)((uint32_t)1 << 31);
}
/* Update backoff_delta and next_attempt in subchannel */
static void update_reconnect_parameters(grpc_subchannel *c) {
size_t i;
gpr_int32 backoff_delta_millis, jitter;
gpr_int32 max_backoff_millis =
int32_t backoff_delta_millis, jitter;
int32_t max_backoff_millis =
GRPC_SUBCHANNEL_RECONNECT_MAX_BACKOFF_SECONDS * 1000;
double jitter_range;
@ -567,8 +567,8 @@ static void update_reconnect_parameters(grpc_subchannel *c) {
}
backoff_delta_millis =
(gpr_int32)(gpr_time_to_millis(c->backoff_delta) *
GRPC_SUBCHANNEL_RECONNECT_BACKOFF_MULTIPLIER);
(int32_t)(gpr_time_to_millis(c->backoff_delta) *
GRPC_SUBCHANNEL_RECONNECT_BACKOFF_MULTIPLIER);
if (backoff_delta_millis > max_backoff_millis) {
backoff_delta_millis = max_backoff_millis;
}
@ -578,7 +578,7 @@ static void update_reconnect_parameters(grpc_subchannel *c) {
jitter_range = GRPC_SUBCHANNEL_RECONNECT_JITTER * backoff_delta_millis;
jitter =
(gpr_int32)((2 * generate_uniform_random_number(c) - 1) * jitter_range);
(int32_t)((2 * generate_uniform_random_number(c) - 1) * jitter_range);
c->next_attempt =
gpr_time_add(c->next_attempt, gpr_time_from_millis(jitter, GPR_TIMESPAN));
}

@ -93,7 +93,7 @@ gpr_slice grpc_httpcli_format_post_request(const grpc_httpcli_request *request,
gpr_strvec_add(&out, gpr_strdup("POST "));
fill_common_header(request, &out);
if (body_bytes) {
gpr_uint8 has_content_type = 0;
uint8_t has_content_type = 0;
for (i = 0; i < request->hdr_count; i++) {
if (strcmp(request->hdrs[i].key, "Content-Type") == 0) {
has_content_type = 1;

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -84,8 +84,7 @@ static void httpcli_ssl_do_handshake(grpc_exec_ctx *exec_ctx,
}
static void httpcli_ssl_check_peer(grpc_exec_ctx *exec_ctx,
grpc_security_connector *sc,
tsi_peer peer,
grpc_security_connector *sc, tsi_peer peer,
grpc_security_peer_check_cb cb,
void *user_data) {
grpc_httpcli_ssl_channel_security_connector *c =

@ -40,9 +40,9 @@
#include <grpc/support/useful.h>
static int handle_response_line(grpc_httpcli_parser *parser) {
gpr_uint8 *beg = parser->cur_line;
gpr_uint8 *cur = beg;
gpr_uint8 *end = beg + parser->cur_line_length;
uint8_t *beg = parser->cur_line;
uint8_t *cur = beg;
uint8_t *end = beg + parser->cur_line_length;
if (cur == end || *cur++ != 'H') goto error;
if (cur == end || *cur++ != 'T') goto error;
@ -77,9 +77,9 @@ static char *buf2str(void *buffer, size_t length) {
}
static int add_header(grpc_httpcli_parser *parser) {
gpr_uint8 *beg = parser->cur_line;
gpr_uint8 *cur = beg;
gpr_uint8 *end = beg + parser->cur_line_length;
uint8_t *beg = parser->cur_line;
uint8_t *cur = beg;
uint8_t *end = beg + parser->cur_line_length;
grpc_httpcli_header hdr = {NULL, NULL};
GPR_ASSERT(cur != end);
@ -146,7 +146,7 @@ static int finish_line(grpc_httpcli_parser *parser) {
return 1;
}
static int addbyte(grpc_httpcli_parser *parser, gpr_uint8 byte) {
static int addbyte(grpc_httpcli_parser *parser, uint8_t byte) {
switch (parser->state) {
case GRPC_HTTPCLI_INITIAL_RESPONSE:
case GRPC_HTTPCLI_HEADERS:

@ -51,7 +51,7 @@ typedef struct {
size_t body_capacity;
size_t hdr_capacity;
gpr_uint8 cur_line[GRPC_HTTPCLI_MAX_HEADER_LENGTH];
uint8_t cur_line[GRPC_HTTPCLI_MAX_HEADER_LENGTH];
size_t cur_line_length;
} grpc_httpcli_parser;

@ -49,7 +49,7 @@ void grpc_closure_list_add(grpc_closure_list *closure_list,
if (closure_list->head == NULL) {
closure_list->head = closure;
} else {
closure_list->tail->final_data |= (gpr_uintptr)closure;
closure_list->tail->final_data |= (uintptr_t)closure;
}
closure_list->tail = closure;
}
@ -65,7 +65,7 @@ void grpc_closure_list_move(grpc_closure_list *src, grpc_closure_list *dst) {
if (dst->head == NULL) {
*dst = *src;
} else {
dst->tail->final_data |= (gpr_uintptr)src->head;
dst->tail->final_data |= (uintptr_t)src->head;
dst->tail = src->tail;
}
src->head = src->tail = NULL;
@ -94,5 +94,5 @@ grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg) {
}
grpc_closure *grpc_closure_next(grpc_closure *closure) {
return (grpc_closure *)(closure->final_data & ~(gpr_uintptr)1);
return (grpc_closure *)(closure->final_data & ~(uintptr_t)1);
}

@ -67,7 +67,7 @@ struct grpc_closure {
/** Once enqueued, contains in the lower bit the success of the closure,
and in the upper bits the pointer to the next closure in the list.
Before enqueing for execution, this is usable for scratch data. */
gpr_uintptr final_data;
uintptr_t final_data;
};
/** Initializes \a closure with \a cb and \a cb_arg. */

@ -45,7 +45,7 @@ int grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
while (c != NULL) {
int success = (int)(c->final_data & 1);
grpc_closure *next = (grpc_closure *)(c->final_data & ~(gpr_uintptr)1);
grpc_closure *next = (grpc_closure *)(c->final_data & ~(uintptr_t)1);
did_something++;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush.cb", 0);
c->cb(exec_ctx, c->cb_arg, success);

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -101,6 +101,7 @@ static grpc_fd *alloc_fd(int fd) {
r->read_watcher = r->write_watcher = NULL;
r->on_done_closure = NULL;
r->closed = 0;
r->released = 0;
return r;
}
@ -210,6 +211,24 @@ static int has_watchers(grpc_fd *fd) {
fd->inactive_watcher_root.next != &fd->inactive_watcher_root;
}
static void close_fd_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
fd->closed = 1;
if (!fd->released) {
close(fd->fd);
} else {
grpc_remove_fd_from_all_epoll_sets(fd->fd);
}
grpc_exec_ctx_enqueue(exec_ctx, fd->on_done_closure, 1);
}
int grpc_fd_wrapped_fd(grpc_fd *fd) {
if (fd->released || fd->closed) {
return -1;
} else {
return fd->fd;
}
}
void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done,
int *release_fd, const char *reason) {
fd->on_done_closure = on_done;
@ -222,11 +241,7 @@ void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done,
gpr_mu_lock(&fd->mu);
REF_BY(fd, 1, reason); /* remove active status, but keep referenced */
if (!has_watchers(fd)) {
fd->closed = 1;
if (!fd->released) {
close(fd->fd);
}
grpc_exec_ctx_enqueue(exec_ctx, fd->on_done_closure, 1);
close_fd_locked(exec_ctx, fd);
} else {
wake_all_watchers_locked(fd);
}
@ -318,10 +333,10 @@ void grpc_fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
gpr_mu_unlock(&fd->mu);
}
gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
grpc_pollset_worker *worker, gpr_uint32 read_mask,
gpr_uint32 write_mask, grpc_fd_watcher *watcher) {
gpr_uint32 mask = 0;
uint32_t grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
grpc_pollset_worker *worker, uint32_t read_mask,
uint32_t write_mask, grpc_fd_watcher *watcher) {
uint32_t mask = 0;
grpc_closure *cur;
int requested;
/* keep track of pollers that have requested our events, in case they change
@ -416,11 +431,7 @@ void grpc_fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
maybe_wake_one_watcher_locked(fd);
}
if (grpc_fd_is_orphaned(fd) && !has_watchers(fd) && !fd->closed) {
fd->closed = 1;
if (!fd->released) {
close(fd->fd);
}
grpc_exec_ctx_enqueue(exec_ctx, fd->on_done_closure, 1);
close_fd_locked(exec_ctx, fd);
}
gpr_mu_unlock(&fd->mu);

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -105,6 +105,9 @@ struct grpc_fd {
This takes ownership of closing fd. */
grpc_fd *grpc_fd_create(int fd, const char *name);
/* Return the wrapped fd, or -1 if it has been released or closed. */
int grpc_fd_wrapped_fd(grpc_fd *fd);
/* Releases fd to be asynchronously destroyed.
on_done is called when the underlying file descriptor is definitely close()d.
If on_done is NULL, no callback will be made.
@ -126,9 +129,9 @@ void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done,
Polling strategies that do not need to alter their behavior depending on the
fd's current interest (such as epoll) do not need to call this function.
MUST NOT be called with a pollset lock taken */
gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
grpc_pollset_worker *worker, gpr_uint32 read_mask,
gpr_uint32 write_mask, grpc_fd_watcher *rec);
uint32_t grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
grpc_pollset_worker *worker, uint32_t read_mask,
uint32_t write_mask, grpc_fd_watcher *rec);
/* Complete polling previously started with grpc_fd_begin_poll
MUST NOT be called with a pollset lock taken
if got_read or got_write are 1, also does the become_{readable,writable} as

@ -160,7 +160,7 @@ void grpc_iocp_add_socket(grpc_winsocket *socket) {
HANDLE ret;
if (socket->added_to_iocp) return;
ret = CreateIoCompletionPort((HANDLE)socket->socket, g_iocp,
(gpr_uintptr)socket, 0);
(uintptr_t)socket, 0);
if (!ret) {
char *utf8_message = gpr_format_message(WSAGetLastError());
gpr_log(GPR_ERROR, "Unable to add socket to iocp: %s", utf8_message);

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -43,9 +43,66 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/useful.h>
#include "src/core/iomgr/fd_posix.h"
#include "src/core/support/block_annotate.h"
#include "src/core/profiling/timers.h"
#include "src/core/support/block_annotate.h"
struct epoll_fd_list {
int *epoll_fds;
size_t count;
size_t capacity;
};
static struct epoll_fd_list epoll_fd_global_list;
static gpr_once init_epoll_fd_list_mu = GPR_ONCE_INIT;
static gpr_mu epoll_fd_list_mu;
static void init_mu(void) { gpr_mu_init(&epoll_fd_list_mu); }
static void add_epoll_fd_to_global_list(int epoll_fd) {
gpr_once_init(&init_epoll_fd_list_mu, init_mu);
gpr_mu_lock(&epoll_fd_list_mu);
if (epoll_fd_global_list.count == epoll_fd_global_list.capacity) {
epoll_fd_global_list.capacity =
GPR_MAX((size_t)8, epoll_fd_global_list.capacity * 2);
epoll_fd_global_list.epoll_fds =
gpr_realloc(epoll_fd_global_list.epoll_fds,
epoll_fd_global_list.capacity * sizeof(int));
}
epoll_fd_global_list.epoll_fds[epoll_fd_global_list.count++] = epoll_fd;
gpr_mu_unlock(&epoll_fd_list_mu);
}
static void remove_epoll_fd_from_global_list(int epoll_fd) {
gpr_mu_lock(&epoll_fd_list_mu);
GPR_ASSERT(epoll_fd_global_list.count > 0);
for (size_t i = 0; i < epoll_fd_global_list.count; i++) {
if (epoll_fd == epoll_fd_global_list.epoll_fds[i]) {
epoll_fd_global_list.epoll_fds[i] =
epoll_fd_global_list.epoll_fds[--(epoll_fd_global_list.count)];
break;
}
}
gpr_mu_unlock(&epoll_fd_list_mu);
}
void grpc_remove_fd_from_all_epoll_sets(int fd) {
int err;
gpr_mu_lock(&epoll_fd_list_mu);
if (epoll_fd_global_list.count == 0) {
return;
}
for (size_t i = 0; i < epoll_fd_global_list.count; i++) {
err = epoll_ctl(epoll_fd_global_list.epoll_fds[i], EPOLL_CTL_DEL, fd, NULL);
if (err < 0 && errno != ENOENT) {
gpr_log(GPR_ERROR, "epoll_ctl del for %d failed: %s", fd,
strerror(errno));
}
}
gpr_mu_unlock(&epoll_fd_list_mu);
}
typedef struct {
grpc_pollset *pollset;
@ -211,6 +268,7 @@ static void multipoll_with_epoll_pollset_finish_shutdown(
static void multipoll_with_epoll_pollset_destroy(grpc_pollset *pollset) {
pollset_hdr *h = pollset->data.ptr;
close(h->epoll_fd);
remove_epoll_fd_from_global_list(h->epoll_fd);
gpr_free(h);
}
@ -236,6 +294,7 @@ static void epoll_become_multipoller(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_ERROR, "epoll_create1 failed: %s", strerror(errno));
abort();
}
add_epoll_fd_to_global_list(h->epoll_fd);
ev.events = (uint32_t)(EPOLLIN | EPOLLET);
ev.data.ptr = NULL;
@ -255,4 +314,8 @@ static void epoll_become_multipoller(grpc_exec_ctx *exec_ctx,
grpc_platform_become_multipoller_type grpc_platform_become_multipoller =
epoll_become_multipoller;
#else /* GPR_LINUX_MULTIPOLL_WITH_EPOLL */
void grpc_remove_fd_from_all_epoll_sets(int fd) {}
#endif /* GPR_LINUX_MULTIPOLL_WITH_EPOLL */

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -144,7 +144,9 @@ static void multipoll_with_poll_pollset_maybe_work_and_unlock(
GRPC_SCHEDULING_END_BLOCKING_REGION;
if (r < 0) {
gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
if (errno != EINTR) {
gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
}
for (i = 2; i < pfd_count; i++) {
grpc_fd_end_poll(exec_ctx, &watchers[i], 0, 0);
}

@ -100,7 +100,7 @@ static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
void grpc_pollset_kick_ext(grpc_pollset *p,
grpc_pollset_worker *specific_worker,
gpr_uint32 flags) {
uint32_t flags) {
GPR_TIMER_BEGIN("grpc_pollset_kick_ext", 0);
/* pollset->mu already held */
@ -116,7 +116,7 @@ void grpc_pollset_kick_ext(grpc_pollset *p,
p->kicked_without_pollers = 1;
GPR_TIMER_END("grpc_pollset_kick_ext.broadcast", 0);
} else if (gpr_tls_get(&g_current_thread_worker) !=
(gpr_intptr)specific_worker) {
(intptr_t)specific_worker) {
GPR_TIMER_MARK("different_thread_worker", 0);
if ((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) != 0) {
specific_worker->reevaluate_polling_on_wakeup = 1;
@ -131,19 +131,18 @@ void grpc_pollset_kick_ext(grpc_pollset *p,
specific_worker->kicked_specifically = 1;
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd);
}
} else if (gpr_tls_get(&g_current_thread_poller) != (gpr_intptr)p) {
} else if (gpr_tls_get(&g_current_thread_poller) != (intptr_t)p) {
GPR_ASSERT((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) == 0);
GPR_TIMER_MARK("kick_anonymous", 0);
specific_worker = pop_front_worker(p);
if (specific_worker != NULL) {
if (gpr_tls_get(&g_current_thread_worker) ==
(gpr_intptr)specific_worker) {
if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) {
GPR_TIMER_MARK("kick_anonymous_not_self", 0);
push_back_worker(p, specific_worker);
specific_worker = pop_front_worker(p);
if ((flags & GRPC_POLLSET_CAN_KICK_SELF) == 0 &&
gpr_tls_get(&g_current_thread_worker) ==
(gpr_intptr)specific_worker) {
(intptr_t)specific_worker) {
push_back_worker(p, specific_worker);
specific_worker = NULL;
}
@ -307,9 +306,9 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (!added_worker) {
push_front_worker(pollset, worker);
added_worker = 1;
gpr_tls_set(&g_current_thread_worker, (gpr_intptr)worker);
gpr_tls_set(&g_current_thread_worker, (intptr_t)worker);
}
gpr_tls_set(&g_current_thread_poller, (gpr_intptr)pollset);
gpr_tls_set(&g_current_thread_poller, (intptr_t)pollset);
GPR_TIMER_BEGIN("maybe_work_and_unlock", 0);
pollset->vtable->maybe_work_and_unlock(exec_ctx, pollset, worker,
deadline, now);

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -122,7 +122,7 @@ int grpc_poll_deadline_to_millis_timeout(gpr_timespec deadline,
-- mostly for fd_posix's use. */
void grpc_pollset_kick_ext(grpc_pollset *p,
grpc_pollset_worker *specific_worker,
gpr_uint32 flags);
uint32_t flags);
/* turn a pollset into a multipoller: platform specific */
typedef void (*grpc_platform_become_multipoller_type)(grpc_exec_ctx *exec_ctx,
@ -139,6 +139,8 @@ void grpc_poll_become_multipoller(grpc_exec_ctx *exec_ctx,
* be locked) */
int grpc_pollset_has_workers(grpc_pollset *pollset);
void grpc_remove_fd_from_all_epoll_sets(int fd);
/* override to allow tests to hook poll() usage */
typedef int (*grpc_poll_function_type)(struct pollfd *, nfds_t, int);
extern grpc_poll_function_type grpc_poll_function;

@ -48,8 +48,8 @@
#include "src/core/support/string.h"
static const gpr_uint8 kV4MappedPrefix[] = {0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0xff, 0xff};
static const uint8_t kV4MappedPrefix[] = {0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0xff, 0xff};
int grpc_sockaddr_is_v4mapped(const struct sockaddr *addr,
struct sockaddr_in *addr4_out) {
@ -126,14 +126,14 @@ void grpc_sockaddr_make_wildcard4(int port, struct sockaddr_in *wild_out) {
GPR_ASSERT(port >= 0 && port < 65536);
memset(wild_out, 0, sizeof(*wild_out));
wild_out->sin_family = AF_INET;
wild_out->sin_port = htons((gpr_uint16)port);
wild_out->sin_port = htons((uint16_t)port);
}
void grpc_sockaddr_make_wildcard6(int port, struct sockaddr_in6 *wild_out) {
GPR_ASSERT(port >= 0 && port < 65536);
memset(wild_out, 0, sizeof(*wild_out));
wild_out->sin6_family = AF_INET6;
wild_out->sin6_port = htons((gpr_uint16)port);
wild_out->sin6_port = htons((uint16_t)port);
}
int grpc_sockaddr_to_string(char **out, const struct sockaddr *addr,
@ -220,11 +220,11 @@ int grpc_sockaddr_set_port(const struct sockaddr *addr, int port) {
switch (addr->sa_family) {
case AF_INET:
GPR_ASSERT(port >= 0 && port < 65536);
((struct sockaddr_in *)addr)->sin_port = htons((gpr_uint16)port);
((struct sockaddr_in *)addr)->sin_port = htons((uint16_t)port);
return 1;
case AF_INET6:
GPR_ASSERT(port >= 0 && port < 65536);
((struct sockaddr_in6 *)addr)->sin6_port = htons((gpr_uint16)port);
((struct sockaddr_in6 *)addr)->sin6_port = htons((uint16_t)port);
return 1;
default:
gpr_log(GPR_ERROR, "Unknown socket family %d in grpc_sockaddr_set_port",

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -473,6 +473,12 @@ grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size,
return &tcp->base;
}
int grpc_tcp_fd(grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
GPR_ASSERT(ep->vtable == &vtable);
return grpc_fd_wrapped_fd(tcp->em_fd);
}
void grpc_tcp_destroy_and_release_fd(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
int *fd, grpc_closure *done) {
grpc_tcp *tcp = (grpc_tcp *)ep;

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -56,6 +56,12 @@ extern int grpc_tcp_trace;
grpc_endpoint *grpc_tcp_create(grpc_fd *fd, size_t read_slice_size,
const char *peer_string);
/* Return the tcp endpoint's fd, or -1 if this is not available. Does not
release the fd.
Requires: ep must be a tcp endpoint.
*/
int grpc_tcp_fd(grpc_endpoint *ep);
/* Destroy the tcp endpoint without closing its fd. *fd will be set and done
* will be called when the endpoint is destroyed.
* Requires: ep must be a tcp endpoint and fd must not be NULL. */

@ -78,7 +78,7 @@ struct grpc_tcp_listener {
grpc_fd *emfd;
grpc_tcp_server *server;
union {
gpr_uint8 untyped[GRPC_MAX_SOCKADDR_SIZE];
uint8_t untyped[GRPC_MAX_SOCKADDR_SIZE];
struct sockaddr sockaddr;
struct sockaddr_un un;
} addr;

@ -58,7 +58,7 @@
struct grpc_tcp_listener {
/* This seemingly magic number comes from AcceptEx's documentation. each
address buffer needs to have at least 16 more bytes at their end. */
gpr_uint8 addresses[(sizeof(struct sockaddr_in6) + 16) * 2];
uint8_t addresses[(sizeof(struct sockaddr_in6) + 16) * 2];
/* This will hold the socket for the next accept. */
SOCKET new_socket;
/* The listener winsocket. */

@ -55,7 +55,7 @@ typedef struct {
gpr_timespec queue_deadline_cap;
gpr_timespec min_deadline;
/* Index in the g_shard_queue */
gpr_uint32 shard_queue_index;
uint32_t shard_queue_index;
/* This holds all timers with deadlines < queue_deadline_cap. Timers in this
list have the top bit of their deadline set to 0. */
grpc_timer_heap heap;
@ -82,7 +82,7 @@ static gpr_timespec compute_min_deadline(shard_type *shard) {
}
void grpc_timer_list_init(gpr_timespec now) {
gpr_uint32 i;
uint32_t i;
gpr_mu_init(&g_mu);
gpr_mu_init(&g_checker_mu);
@ -126,8 +126,8 @@ static double ts_to_dbl(gpr_timespec ts) {
static gpr_timespec dbl_to_ts(double d) {
gpr_timespec ts;
ts.tv_sec = (gpr_int64)d;
ts.tv_nsec = (gpr_int32)(1e9 * (d - (double)ts.tv_sec));
ts.tv_sec = (int64_t)d;
ts.tv_nsec = (int32_t)(1e9 * (d - (double)ts.tv_sec));
ts.clock_type = GPR_TIMESPAN;
return ts;
}
@ -143,7 +143,7 @@ static void list_remove(grpc_timer *timer) {
timer->prev->next = timer->next;
}
static void swap_adjacent_shards_in_queue(gpr_uint32 first_shard_queue_index) {
static void swap_adjacent_shards_in_queue(uint32_t first_shard_queue_index) {
shard_type *temp;
temp = g_shard_queue[first_shard_queue_index];
g_shard_queue[first_shard_queue_index] =

@ -41,7 +41,7 @@
typedef struct grpc_timer {
gpr_timespec deadline;
gpr_uint32 heap_index; /* INVALID_HEAP_INDEX if not in heap */
uint32_t heap_index; /* INVALID_HEAP_INDEX if not in heap */
int triggered;
struct grpc_timer *next;
struct grpc_timer *prev;

@ -43,9 +43,9 @@
position. This functor is called each time immediately after modifying a
value in the underlying container, with the offset of the modified element as
its argument. */
static void adjust_upwards(grpc_timer **first, gpr_uint32 i, grpc_timer *t) {
static void adjust_upwards(grpc_timer **first, uint32_t i, grpc_timer *t) {
while (i > 0) {
gpr_uint32 parent = (gpr_uint32)(((int)i - 1) / 2);
uint32_t parent = (uint32_t)(((int)i - 1) / 2);
if (gpr_time_cmp(first[parent]->deadline, t->deadline) >= 0) break;
first[i] = first[parent];
first[i]->heap_index = i;
@ -58,12 +58,12 @@ static void adjust_upwards(grpc_timer **first, gpr_uint32 i, grpc_timer *t) {
/* Adjusts a heap so as to move a hole at position i farther away from the root,
until a suitable position is found for element t. Then, copies t into that
position. */
static void adjust_downwards(grpc_timer **first, gpr_uint32 i,
gpr_uint32 length, grpc_timer *t) {
static void adjust_downwards(grpc_timer **first, uint32_t i, uint32_t length,
grpc_timer *t) {
for (;;) {
gpr_uint32 left_child = 1u + 2u * i;
gpr_uint32 right_child;
gpr_uint32 next_i;
uint32_t left_child = 1u + 2u * i;
uint32_t right_child;
uint32_t next_i;
if (left_child >= length) break;
right_child = left_child + 1;
next_i = right_child < length &&
@ -93,8 +93,8 @@ static void maybe_shrink(grpc_timer_heap *heap) {
}
static void note_changed_priority(grpc_timer_heap *heap, grpc_timer *timer) {
gpr_uint32 i = timer->heap_index;
gpr_uint32 parent = (gpr_uint32)(((int)i - 1) / 2);
uint32_t i = timer->heap_index;
uint32_t parent = (uint32_t)(((int)i - 1) / 2);
if (gpr_time_cmp(heap->timers[parent]->deadline, timer->deadline) < 0) {
adjust_upwards(heap->timers, i, timer);
} else {
@ -122,7 +122,7 @@ int grpc_timer_heap_add(grpc_timer_heap *heap, grpc_timer *timer) {
}
void grpc_timer_heap_remove(grpc_timer_heap *heap, grpc_timer *timer) {
gpr_uint32 i = timer->heap_index;
uint32_t i = timer->heap_index;
if (i == heap->timer_count - 1) {
heap->timer_count--;
maybe_shrink(heap);

@ -38,8 +38,8 @@
typedef struct {
grpc_timer **timers;
gpr_uint32 timer_count;
gpr_uint32 timer_capacity;
uint32_t timer_count;
uint32_t timer_capacity;
} grpc_timer_heap;
/* return 1 if the new timer is the first timer in the heap */

@ -75,7 +75,7 @@ typedef struct {
grpc_fd *emfd;
grpc_udp_server *server;
union {
gpr_uint8 untyped[GRPC_MAX_SOCKADDR_SIZE];
uint8_t untyped[GRPC_MAX_SOCKADDR_SIZE];
struct sockaddr sockaddr;
struct sockaddr_un un;
} addr;

@ -43,17 +43,16 @@ static void json_reader_string_clear(grpc_json_reader *reader) {
reader->vtable->string_clear(reader->userdata);
}
static void json_reader_string_add_char(grpc_json_reader *reader,
gpr_uint32 c) {
static void json_reader_string_add_char(grpc_json_reader *reader, uint32_t c) {
reader->vtable->string_add_char(reader->userdata, c);
}
static void json_reader_string_add_utf32(grpc_json_reader *reader,
gpr_uint32 utf32) {
uint32_t utf32) {
reader->vtable->string_add_utf32(reader->userdata, utf32);
}
static gpr_uint32 grpc_json_reader_read_char(grpc_json_reader *reader) {
static uint32_t grpc_json_reader_read_char(grpc_json_reader *reader) {
return reader->vtable->read_char(reader->userdata);
}
@ -108,7 +107,7 @@ int grpc_json_reader_is_complete(grpc_json_reader *reader) {
}
grpc_json_reader_status grpc_json_reader_run(grpc_json_reader *reader) {
gpr_uint32 c, success;
uint32_t c, success;
/* This state-machine is a strict implementation of ECMA-404 */
for (;;) {
@ -154,7 +153,7 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader *reader) {
case GRPC_JSON_STATE_VALUE_NUMBER_WITH_DECIMAL:
case GRPC_JSON_STATE_VALUE_NUMBER_ZERO:
case GRPC_JSON_STATE_VALUE_NUMBER_EPM:
success = (gpr_uint32)json_reader_set_number(reader);
success = (uint32_t)json_reader_set_number(reader);
if (!success) return GRPC_JSON_PARSE_ERROR;
json_reader_string_clear(reader);
reader->state = GRPC_JSON_STATE_VALUE_END;
@ -181,7 +180,7 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader *reader) {
case GRPC_JSON_STATE_VALUE_NUMBER_WITH_DECIMAL:
case GRPC_JSON_STATE_VALUE_NUMBER_ZERO:
case GRPC_JSON_STATE_VALUE_NUMBER_EPM:
success = (gpr_uint32)json_reader_set_number(reader);
success = (uint32_t)json_reader_set_number(reader);
if (!success) return GRPC_JSON_PARSE_ERROR;
json_reader_string_clear(reader);
reader->state = GRPC_JSON_STATE_VALUE_END;
@ -416,8 +415,8 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader *reader) {
} else {
return GRPC_JSON_PARSE_ERROR;
}
reader->unicode_char = (gpr_uint16)(reader->unicode_char << 4);
reader->unicode_char = (gpr_uint16)(reader->unicode_char | c);
reader->unicode_char = (uint16_t)(reader->unicode_char << 4);
reader->unicode_char = (uint16_t)(reader->unicode_char | c);
switch (reader->state) {
case GRPC_JSON_STATE_STRING_ESCAPE_U1:
@ -440,13 +439,13 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader *reader) {
reader->unicode_high_surrogate = reader->unicode_char;
} else if ((reader->unicode_char & 0xfc00) == 0xdc00) {
/* low surrogate utf-16 */
gpr_uint32 utf32;
uint32_t utf32;
if (reader->unicode_high_surrogate == 0)
return GRPC_JSON_PARSE_ERROR;
utf32 = 0x10000;
utf32 += (gpr_uint32)(
utf32 += (uint32_t)(
(reader->unicode_high_surrogate - 0xd800) * 0x400);
utf32 += (gpr_uint32)(reader->unicode_char - 0xdc00);
utf32 += (uint32_t)(reader->unicode_char - 0xdc00);
json_reader_string_add_utf32(reader, utf32);
reader->unicode_high_surrogate = 0;
} else {

@ -84,11 +84,11 @@ typedef struct grpc_json_reader_vtable {
/* Clears your internal string scratchpad. */
void (*string_clear)(void *userdata);
/* Adds a char to the string scratchpad. */
void (*string_add_char)(void *userdata, gpr_uint32 c);
void (*string_add_char)(void *userdata, uint32_t c);
/* Adds a utf32 char to the string scratchpad. */
void (*string_add_utf32)(void *userdata, gpr_uint32 c);
void (*string_add_utf32)(void *userdata, uint32_t c);
/* Reads a character from your input. May be utf-8, 16 or 32. */
gpr_uint32 (*read_char)(void *userdata);
uint32_t (*read_char)(void *userdata);
/* Starts a container of type GRPC_JSON_ARRAY or GRPC_JSON_OBJECT. */
void (*container_begins)(void *userdata, grpc_json_type type);
/* Ends the current container. Must return the type of its parent. */
@ -117,7 +117,7 @@ typedef struct grpc_json_reader {
int in_array;
int escaped_string_was_key;
int container_just_begun;
gpr_uint16 unicode_char, unicode_high_surrogate;
uint16_t unicode_char, unicode_high_surrogate;
grpc_json_reader_state state;
} grpc_json_reader;

@ -56,10 +56,10 @@ typedef struct {
grpc_json *top;
grpc_json *current_container;
grpc_json *current_value;
gpr_uint8 *input;
gpr_uint8 *key;
gpr_uint8 *string;
gpr_uint8 *string_ptr;
uint8_t *input;
uint8_t *key;
uint8_t *string;
uint8_t *string_ptr;
size_t remaining_input;
} json_reader_userdata;
@ -122,36 +122,36 @@ static void json_reader_string_clear(void *userdata) {
state->string = state->string_ptr;
}
static void json_reader_string_add_char(void *userdata, gpr_uint32 c) {
static void json_reader_string_add_char(void *userdata, uint32_t c) {
json_reader_userdata *state = userdata;
GPR_ASSERT(state->string_ptr < state->input);
GPR_ASSERT(c <= 0xff);
*state->string_ptr++ = (gpr_uint8)c;
*state->string_ptr++ = (uint8_t)c;
}
/* We are converting a UTF-32 character into UTF-8 here,
* as described by RFC3629.
*/
static void json_reader_string_add_utf32(void *userdata, gpr_uint32 c) {
static void json_reader_string_add_utf32(void *userdata, uint32_t c) {
if (c <= 0x7f) {
json_reader_string_add_char(userdata, c);
} else if (c <= 0x7ff) {
gpr_uint32 b1 = 0xc0 | ((c >> 6) & 0x1f);
gpr_uint32 b2 = 0x80 | (c & 0x3f);
uint32_t b1 = 0xc0 | ((c >> 6) & 0x1f);
uint32_t b2 = 0x80 | (c & 0x3f);
json_reader_string_add_char(userdata, b1);
json_reader_string_add_char(userdata, b2);
} else if (c <= 0xffff) {
gpr_uint32 b1 = 0xe0 | ((c >> 12) & 0x0f);
gpr_uint32 b2 = 0x80 | ((c >> 6) & 0x3f);
gpr_uint32 b3 = 0x80 | (c & 0x3f);
uint32_t b1 = 0xe0 | ((c >> 12) & 0x0f);
uint32_t b2 = 0x80 | ((c >> 6) & 0x3f);
uint32_t b3 = 0x80 | (c & 0x3f);
json_reader_string_add_char(userdata, b1);
json_reader_string_add_char(userdata, b2);
json_reader_string_add_char(userdata, b3);
} else if (c <= 0x1fffff) {
gpr_uint32 b1 = 0xf0 | ((c >> 18) & 0x07);
gpr_uint32 b2 = 0x80 | ((c >> 12) & 0x3f);
gpr_uint32 b3 = 0x80 | ((c >> 6) & 0x3f);
gpr_uint32 b4 = 0x80 | (c & 0x3f);
uint32_t b1 = 0xf0 | ((c >> 18) & 0x07);
uint32_t b2 = 0x80 | ((c >> 12) & 0x3f);
uint32_t b3 = 0x80 | ((c >> 6) & 0x3f);
uint32_t b4 = 0x80 | (c & 0x3f);
json_reader_string_add_char(userdata, b1);
json_reader_string_add_char(userdata, b2);
json_reader_string_add_char(userdata, b3);
@ -162,8 +162,8 @@ static void json_reader_string_add_utf32(void *userdata, gpr_uint32 c) {
/* We consider that the input may be a zero-terminated string. So we
* can end up hitting eof before the end of the alleged string length.
*/
static gpr_uint32 json_reader_read_char(void *userdata) {
gpr_uint32 r;
static uint32_t json_reader_read_char(void *userdata) {
uint32_t r;
json_reader_userdata *state = userdata;
if (state->remaining_input == 0) return GRPC_JSON_READ_CHAR_EOF;
@ -302,7 +302,7 @@ grpc_json *grpc_json_parse_string_with_len(char *input, size_t size) {
state.top = state.current_container = state.current_value = NULL;
state.string = state.key = NULL;
state.string_ptr = state.input = (gpr_uint8 *)input;
state.string_ptr = state.input = (uint8_t *)input;
state.remaining_input = size;
grpc_json_reader_init(&reader, &reader_vtable, &state);

@ -100,8 +100,7 @@ static void json_writer_value_end(grpc_json_writer *writer) {
}
}
static void json_writer_escape_utf16(grpc_json_writer *writer,
gpr_uint16 utf16) {
static void json_writer_escape_utf16(grpc_json_writer *writer, uint16_t utf16) {
static const char hex[] = "0123456789abcdef";
json_writer_output_string_with_len(writer, "\\u", 2);
@ -116,7 +115,7 @@ static void json_writer_escape_string(grpc_json_writer *writer,
json_writer_output_char(writer, '"');
for (;;) {
gpr_uint8 c = (gpr_uint8)*string++;
uint8_t c = (uint8_t)*string++;
if (c == 0) {
break;
} else if ((c >= 32) && (c <= 126)) {
@ -144,7 +143,7 @@ static void json_writer_escape_string(grpc_json_writer *writer,
break;
}
} else {
gpr_uint32 utf32 = 0;
uint32_t utf32 = 0;
int extra = 0;
int i;
int valid = 1;
@ -162,7 +161,7 @@ static void json_writer_escape_string(grpc_json_writer *writer,
}
for (i = 0; i < extra; i++) {
utf32 <<= 6;
c = (gpr_uint8)(*string++);
c = (uint8_t)(*string++);
/* Breaks out and bail on any invalid UTF-8 sequence, including \0. */
if ((c & 0xc0) != 0x80) {
valid = 0;
@ -195,11 +194,10 @@ static void json_writer_escape_string(grpc_json_writer *writer,
* That range is exactly 20 bits.
*/
utf32 -= 0x10000;
json_writer_escape_utf16(writer, (gpr_uint16)(0xd800 | (utf32 >> 10)));
json_writer_escape_utf16(writer,
(gpr_uint16)(0xdc00 | (utf32 & 0x3ff)));
json_writer_escape_utf16(writer, (uint16_t)(0xd800 | (utf32 >> 10)));
json_writer_escape_utf16(writer, (uint16_t)(0xdc00 | (utf32 & 0x3ff)));
} else {
json_writer_escape_utf16(writer, (gpr_uint16)utf32);
json_writer_escape_utf16(writer, (uint16_t)utf32);
}
}
}

@ -52,7 +52,7 @@ typedef struct gpr_timer_entry {
const char *file;
short line;
char type;
gpr_uint8 important;
uint8_t important;
int thd;
} gpr_timer_entry;

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -33,6 +33,7 @@
#include "src/core/security/base64.h"
#include <stdint.h>
#include <string.h>
#include <grpc/support/alloc.h>
@ -41,7 +42,7 @@
/* --- Constants. --- */
static const char base64_bytes[] = {
static const int8_t base64_bytes[] = {
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
@ -114,7 +115,7 @@ char *grpc_base64_encode(const void *vdata, size_t data_size, int url_safe,
}
GPR_ASSERT(current >= result);
GPR_ASSERT((gpr_uintptr)(current - result) < result_projected_size);
GPR_ASSERT((uintptr_t)(current - result) < result_projected_size);
result[current - result] = '\0';
return result;
}
@ -125,14 +126,14 @@ gpr_slice grpc_base64_decode(const char *b64, int url_safe) {
static void decode_one_char(const unsigned char *codes, unsigned char *result,
size_t *result_offset) {
gpr_uint32 packed = ((gpr_uint32)codes[0] << 2) | ((gpr_uint32)codes[1] >> 4);
uint32_t packed = ((uint32_t)codes[0] << 2) | ((uint32_t)codes[1] >> 4);
result[(*result_offset)++] = (unsigned char)packed;
}
static void decode_two_chars(const unsigned char *codes, unsigned char *result,
size_t *result_offset) {
gpr_uint32 packed = ((gpr_uint32)codes[0] << 10) |
((gpr_uint32)codes[1] << 4) | ((gpr_uint32)codes[2] >> 2);
uint32_t packed = ((uint32_t)codes[0] << 10) | ((uint32_t)codes[1] << 4) |
((uint32_t)codes[2] >> 2);
result[(*result_offset)++] = (unsigned char)(packed >> 8);
result[(*result_offset)++] = (unsigned char)(packed);
}
@ -172,9 +173,8 @@ static int decode_group(const unsigned char *codes, size_t num_codes,
decode_two_chars(codes, result, result_offset);
} else {
/* No padding. */
gpr_uint32 packed = ((gpr_uint32)codes[0] << 18) |
((gpr_uint32)codes[1] << 12) |
((gpr_uint32)codes[2] << 6) | codes[3];
uint32_t packed = ((uint32_t)codes[0] << 18) | ((uint32_t)codes[1] << 12) |
((uint32_t)codes[2] << 6) | codes[3];
result[(*result_offset)++] = (unsigned char)(packed >> 16);
result[(*result_offset)++] = (unsigned char)(packed >> 8);
result[(*result_offset)++] = (unsigned char)(packed);

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -60,7 +60,7 @@ typedef struct {
progress */
grpc_pollset *pollset;
grpc_transport_stream_op op;
gpr_uint8 security_context_set;
uint8_t security_context_set;
grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT];
grpc_auth_metadata_context auth_md_context;
} call_data;
@ -232,8 +232,8 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
}
sec_ctx = op->context[GRPC_CONTEXT_SECURITY].value;
GRPC_AUTH_CONTEXT_UNREF(sec_ctx->auth_context, "client auth filter");
sec_ctx->auth_context = GRPC_AUTH_CONTEXT_REF(
chand->auth_context, "client_auth_filter");
sec_ctx->auth_context =
GRPC_AUTH_CONTEXT_REF(chand->auth_context, "client_auth_filter");
}
if (op->send_initial_metadata != NULL) {

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -180,7 +180,7 @@ void grpc_server_credentials_set_auth_metadata_processor(
"grpc_server_credentials_set_auth_metadata_processor("
"creds=%p, "
"processor=grpc_auth_metadata_processor { process: %p, state: %p })",
3, (creds, (void*)(gpr_intptr)processor.process, processor.state));
3, (creds, (void *)(intptr_t)processor.process, processor.state));
if (creds == NULL) return;
if (creds->processor.destroy != NULL && creds->processor.state != NULL) {
creds->processor.destroy(creds->processor.state);

@ -215,8 +215,8 @@ static char *encoded_jwt_claim(const grpc_auth_json_key *json_key,
gpr_log(GPR_INFO, "Cropping token lifetime to maximum allowed value.");
expiration = gpr_time_add(now, grpc_max_auth_token_lifetime);
}
gpr_int64toa(now.tv_sec, now_str);
gpr_int64toa(expiration.tv_sec, expiration_str);
int64_ttoa(now.tv_sec, now_str);
int64_ttoa(expiration.tv_sec, expiration_str);
child =
create_child(NULL, json, "iss", json_key->client_email, GRPC_JSON_STRING);
@ -251,7 +251,7 @@ static char *dot_concat_and_free_strings(char *str1, char *str2) {
memcpy(current, str2, str2_len);
current += str2_len;
GPR_ASSERT(current >= result);
GPR_ASSERT((gpr_uintptr)(current - result) == result_len);
GPR_ASSERT((uintptr_t)(current - result) == result_len);
*current = '\0';
gpr_free(str1);
gpr_free(str2);

@ -443,8 +443,8 @@ static BIGNUM *bignum_from_base64(const char *b64) {
gpr_log(GPR_ERROR, "Invalid base64 for big num.");
return NULL;
}
result =
BN_bin2bn(GPR_SLICE_START_PTR(bin), TSI_SIZE_AS_SIZE(GPR_SLICE_LENGTH(bin)), NULL);
result = BN_bin2bn(GPR_SLICE_START_PTR(bin),
TSI_SIZE_AS_SIZE(GPR_SLICE_LENGTH(bin)), NULL);
gpr_slice_unref(bin);
return result;
}

@ -117,8 +117,8 @@ static void secure_endpoint_unref(grpc_exec_ctx *exec_ctx,
static void secure_endpoint_ref(secure_endpoint *ep) { gpr_ref(&ep->ref); }
#endif
static void flush_read_staging_buffer(secure_endpoint *ep, gpr_uint8 **cur,
gpr_uint8 **end) {
static void flush_read_staging_buffer(secure_endpoint *ep, uint8_t **cur,
uint8_t **end) {
gpr_slice_buffer_add(ep->read_buffer, ep->read_staging_buffer);
ep->read_staging_buffer = gpr_slice_malloc(STAGING_BUFFER_SIZE);
*cur = GPR_SLICE_START_PTR(ep->read_staging_buffer);
@ -143,11 +143,11 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, secure_endpoint *ep,
static void on_read(grpc_exec_ctx *exec_ctx, void *user_data, int success) {
unsigned i;
gpr_uint8 keep_looping = 0;
uint8_t keep_looping = 0;
tsi_result result = TSI_OK;
secure_endpoint *ep = (secure_endpoint *)user_data;
gpr_uint8 *cur = GPR_SLICE_START_PTR(ep->read_staging_buffer);
gpr_uint8 *end = GPR_SLICE_END_PTR(ep->read_staging_buffer);
uint8_t *cur = GPR_SLICE_START_PTR(ep->read_staging_buffer);
uint8_t *end = GPR_SLICE_END_PTR(ep->read_staging_buffer);
if (!success) {
gpr_slice_buffer_reset_and_unref(ep->read_buffer);
@ -158,7 +158,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *user_data, int success) {
/* TODO(yangg) check error, maybe bail out early */
for (i = 0; i < ep->source_buffer.count; i++) {
gpr_slice encrypted = ep->source_buffer.slices[i];
gpr_uint8 *message_bytes = GPR_SLICE_START_PTR(encrypted);
uint8_t *message_bytes = GPR_SLICE_START_PTR(encrypted);
size_t message_size = GPR_SLICE_LENGTH(encrypted);
while (message_size > 0 || keep_looping) {
@ -234,8 +234,8 @@ static void endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
&ep->on_read);
}
static void flush_write_staging_buffer(secure_endpoint *ep, gpr_uint8 **cur,
gpr_uint8 **end) {
static void flush_write_staging_buffer(secure_endpoint *ep, uint8_t **cur,
uint8_t **end) {
gpr_slice_buffer_add(&ep->output_buffer, ep->write_staging_buffer);
ep->write_staging_buffer = gpr_slice_malloc(STAGING_BUFFER_SIZE);
*cur = GPR_SLICE_START_PTR(ep->write_staging_buffer);
@ -247,8 +247,8 @@ static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
unsigned i;
tsi_result result = TSI_OK;
secure_endpoint *ep = (secure_endpoint *)secure_ep;
gpr_uint8 *cur = GPR_SLICE_START_PTR(ep->write_staging_buffer);
gpr_uint8 *end = GPR_SLICE_END_PTR(ep->write_staging_buffer);
uint8_t *cur = GPR_SLICE_START_PTR(ep->write_staging_buffer);
uint8_t *end = GPR_SLICE_END_PTR(ep->write_staging_buffer);
gpr_slice_buffer_reset_and_unref(&ep->output_buffer);
@ -263,7 +263,7 @@ static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
for (i = 0; i < slices->count; i++) {
gpr_slice plain = slices->slices[i];
gpr_uint8 *message_bytes = GPR_SLICE_START_PTR(plain);
uint8_t *message_bytes = GPR_SLICE_START_PTR(plain);
size_t message_size = GPR_SLICE_LENGTH(plain);
while (message_size > 0) {
size_t protected_buffer_size_to_send = (size_t)(end - cur);

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -316,8 +316,7 @@ grpc_channel_security_connector *grpc_fake_channel_security_connector_create(
c->base.is_client_side = 1;
c->base.url_scheme = GRPC_FAKE_SECURITY_URL_SCHEME;
c->base.vtable = &fake_channel_vtable;
c->request_metadata_creds =
grpc_call_credentials_ref(request_metadata_creds);
c->request_metadata_creds = grpc_call_credentials_ref(request_metadata_creds);
c->check_call_host = fake_channel_check_call_host;
return c;
}
@ -500,9 +499,10 @@ static grpc_security_status ssl_check_peer(grpc_security_connector *sc,
return GRPC_SECURITY_OK;
}
static void ssl_channel_check_peer(
grpc_exec_ctx *exec_ctx, grpc_security_connector *sc, tsi_peer peer,
grpc_security_peer_check_cb cb, void *user_data) {
static void ssl_channel_check_peer(grpc_exec_ctx *exec_ctx,
grpc_security_connector *sc, tsi_peer peer,
grpc_security_peer_check_cb cb,
void *user_data) {
grpc_ssl_channel_security_connector *c =
(grpc_ssl_channel_security_connector *)sc;
grpc_security_status status;
@ -516,9 +516,10 @@ static void ssl_channel_check_peer(
tsi_peer_destruct(&peer);
}
static void ssl_server_check_peer(
grpc_exec_ctx *exec_ctx, grpc_security_connector *sc, tsi_peer peer,
grpc_security_peer_check_cb cb, void *user_data) {
static void ssl_server_check_peer(grpc_exec_ctx *exec_ctx,
grpc_security_connector *sc, tsi_peer peer,
grpc_security_peer_check_cb cb,
void *user_data) {
grpc_auth_context *auth_context = NULL;
grpc_security_status status = ssl_check_peer(sc, NULL, &peer, &auth_context);
tsi_peer_destruct(&peer);

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -40,10 +40,7 @@
/* --- status enum. --- */
typedef enum {
GRPC_SECURITY_OK = 0,
GRPC_SECURITY_ERROR
} grpc_security_status;
typedef enum { GRPC_SECURITY_OK = 0, GRPC_SECURITY_ERROR } grpc_security_status;
/* --- URL schemes. --- */

@ -42,8 +42,8 @@
/* Structure of a census op id. Define as structure because 64bit integer is not
available on every platform for C89. */
typedef struct census_op_id {
gpr_uint32 upper;
gpr_uint32 lower;
uint32_t upper;
uint32_t lower;
} census_op_id;
typedef struct census_rpc_stats census_rpc_stats;

@ -116,7 +116,7 @@ typedef struct census_log_block {
simultaneously by reader and writer. */
gpr_atm bytes_committed;
/* Bytes already read */
gpr_int32 bytes_read;
int32_t bytes_read;
/* Links for list */
cl_block_list_struct link;
/* We want this structure to be cacheline aligned. We assume the following
@ -124,7 +124,7 @@ typedef struct census_log_block {
type 32b size 64b size
char* 4 8
3x gpr_atm 12 24
gpr_int32 4 8 (assumes padding)
int32_t 4 8 (assumes padding)
cl_block_list_struct 12 24
TOTAL 32 64
@ -147,7 +147,7 @@ typedef struct census_log_block {
/* A list of cl_blocks, doubly-linked through cl_block::link. */
typedef struct census_log_block_list {
gpr_int32 count; /* Number of items in list. */
int32_t count; /* Number of items in list. */
cl_block_list_struct ht; /* head/tail of linked list. */
} cl_block_list;
@ -175,7 +175,7 @@ struct census_log {
/* Number of cores (aka hardware-contexts) */
unsigned num_cores;
/* number of CENSUS_LOG_2_MAX_RECORD_SIZE blocks in log */
gpr_int32 num_blocks;
int32_t num_blocks;
cl_block *blocks; /* Block metadata. */
cl_core_local_block *core_local_blocks; /* Keeps core to block mappings. */
gpr_mu lock;
@ -183,7 +183,7 @@ struct census_log {
/* Keeps the state of the reader iterator. A value of 0 indicates that
iterator has reached the end. census_log_init_reader() resets the
value to num_core to restart iteration. */
gpr_uint32 read_iterator_state;
uint32_t read_iterator_state;
/* Points to the block being read. If non-NULL, the block is locked for
reading (block_being_read_->reader_lock is held). */
cl_block *block_being_read;
@ -276,11 +276,11 @@ static void cl_block_initialize(cl_block *block, char *buffer) {
/* Guards against exposing partially written buffer to the reader. */
static void cl_block_set_bytes_committed(cl_block *block,
gpr_int32 bytes_committed) {
int32_t bytes_committed) {
gpr_atm_rel_store(&block->bytes_committed, bytes_committed);
}
static gpr_int32 cl_block_get_bytes_committed(cl_block *block) {
static int32_t cl_block_get_bytes_committed(cl_block *block) {
return gpr_atm_acq_load(&block->bytes_committed);
}
@ -317,7 +317,7 @@ static void cl_block_enable_access(cl_block *block) {
/* Returns with writer_lock held. */
static void *cl_block_start_write(cl_block *block, size_t size) {
gpr_int32 bytes_committed;
int32_t bytes_committed;
if (!cl_try_lock(&block->writer_lock)) {
return NULL;
}
@ -395,8 +395,7 @@ static cl_block *cl_allocate_block(void) {
- allocated a new block OR
- 'core_id' => 'old_block' mapping changed (another thread allocated a
block before lock was acquired). */
static int cl_allocate_core_local_block(gpr_int32 core_id,
cl_block *old_block) {
static int cl_allocate_core_local_block(int32_t core_id, cl_block *old_block) {
/* Now that we have the lock, check if core-local mapping has changed. */
cl_core_local_block *core_local_block = &g_log.core_local_blocks[core_id];
cl_block *block = cl_core_local_block_get_block(core_local_block);
@ -418,8 +417,8 @@ static int cl_allocate_core_local_block(gpr_int32 core_id,
}
static cl_block *cl_get_block(void *record) {
gpr_uintptr p = (gpr_uintptr)((char *)record - g_log.buffer);
gpr_uintptr index = p >> CENSUS_LOG_2_MAX_RECORD_SIZE;
uintptr_t p = (uintptr_t)((char *)record - g_log.buffer);
uintptr_t index = p >> CENSUS_LOG_2_MAX_RECORD_SIZE;
return &g_log.blocks[index];
}
@ -460,7 +459,7 @@ static cl_block *cl_next_block_to_read(cl_block *prev) {
/* External functions: primary stats_log interface */
void census_log_initialize(size_t size_in_mb, int discard_old_records) {
gpr_int32 ix;
int32_t ix;
/* Check cacheline alignment. */
GPR_ASSERT(sizeof(cl_block) % GPR_CACHELINE_SIZE == 0);
GPR_ASSERT(sizeof(cl_core_local_block) % GPR_CACHELINE_SIZE == 0);
@ -510,9 +509,9 @@ void census_log_shutdown(void) {
void *census_log_start_write(size_t size) {
/* Used to bound number of times block allocation is attempted. */
gpr_int32 attempts_remaining = g_log.num_blocks;
int32_t attempts_remaining = g_log.num_blocks;
/* TODO(aveitch): move this inside the do loop when current_cpu is fixed */
gpr_int32 core_id = gpr_cpu_current_cpu();
int32_t core_id = gpr_cpu_current_cpu();
GPR_ASSERT(g_log.initialized);
if (size > CENSUS_LOG_MAX_RECORD_SIZE) {
return NULL;

@ -70,9 +70,9 @@ static int cmp_str_keys(const void *k1, const void *k2) {
}
/* TODO(hongyu): replace it with cityhash64 */
static gpr_uint64 simple_hash(const void *k) {
static uint64_t simple_hash(const void *k) {
size_t len = strlen(k);
gpr_uint64 higher = gpr_murmur_hash3((const char *)k, len / 2, 0);
uint64_t higher = gpr_murmur_hash3((const char *)k, len / 2, 0);
return higher << 32 |
gpr_murmur_hash3((const char *)k + len / 2, len - len / 2, 0);
}

@ -42,9 +42,9 @@ extern "C" {
#endif
struct census_rpc_stats {
gpr_uint64 cnt;
gpr_uint64 rpc_error_cnt;
gpr_uint64 app_error_cnt;
uint64_t cnt;
uint64_t rpc_error_cnt;
uint64_t app_error_cnt;
double elapsed_time_ms;
double api_request_bytes;
double wire_request_bytes;

@ -68,14 +68,14 @@ static const census_ht_option ht_opt = {
static gpr_once g_init_mutex_once = GPR_ONCE_INIT;
static gpr_mu g_mu; /* Guards following two static variables. */
static census_ht *g_trace_store = NULL;
static gpr_uint64 g_id = 0;
static uint64_t g_id = 0;
static census_ht_key op_id_as_key(census_op_id *id) {
return *(census_ht_key *)id;
}
static gpr_uint64 op_id_2_uint64(census_op_id *id) {
gpr_uint64 ret;
static uint64_t op_id_2_uint64(census_op_id *id) {
uint64_t ret;
memcpy(&ret, id, sizeof(census_op_id));
return ret;
}

@ -54,29 +54,29 @@ typedef struct bucket {
/* NULL if bucket is empty */
ht_entry *next;
/* -1 if all buckets are empty. */
gpr_int32 prev_non_empty_bucket;
int32_t prev_non_empty_bucket;
/* -1 if all buckets are empty. */
gpr_int32 next_non_empty_bucket;
int32_t next_non_empty_bucket;
} bucket;
struct unresizable_hash_table {
/* Number of entries in the table */
size_t size;
/* Number of buckets */
gpr_uint32 num_buckets;
uint32_t num_buckets;
/* Array of buckets initialized at creation time. Memory consumption is
16 bytes per bucket on a 64-bit platform. */
bucket *buckets;
/* Index of the first non-empty bucket. -1 iff size == 0. */
gpr_int32 first_non_empty_bucket;
int32_t first_non_empty_bucket;
/* Index of the last non_empty bucket. -1 iff size == 0. */
gpr_int32 last_non_empty_bucket;
int32_t last_non_empty_bucket;
/* Immutable options of this hash table, initialized at creation time. */
census_ht_option options;
};
typedef struct entry_locator {
gpr_int32 bucket_idx;
int32_t bucket_idx;
int is_first_in_chain;
int found;
ht_entry *prev_entry;
@ -113,7 +113,7 @@ static void delete_entry(const census_ht_option *opt, ht_entry *p) {
gpr_free(p);
}
static gpr_uint64 hash(const census_ht_option *opt, census_ht_key key) {
static uint64_t hash(const census_ht_option *opt, census_ht_key key) {
return opt->key_type == CENSUS_HT_UINT64 ? key.val : opt->hash(key.ptr);
}
@ -135,7 +135,7 @@ census_ht *census_ht_create(const census_ht_option *option) {
return ret;
}
static gpr_int32 find_bucket_idx(const census_ht *ht, census_ht_key key) {
static int32_t find_bucket_idx(const census_ht *ht, census_ht_key key) {
return hash(&ht->options, key) % ht->num_buckets;
}
@ -149,7 +149,7 @@ static int keys_match(const census_ht_option *opt, const ht_entry *p,
static entry_locator ht_find(const census_ht *ht, census_ht_key key) {
entry_locator loc = {0, 0, 0, NULL};
gpr_int32 idx = 0;
int32_t idx = 0;
ht_entry *ptr = NULL;
GPR_ASSERT(ht != NULL);
idx = find_bucket_idx(ht, key);
@ -188,7 +188,7 @@ void *census_ht_find(const census_ht *ht, census_ht_key key) {
}
void census_ht_insert(census_ht *ht, census_ht_key key, void *data) {
gpr_int32 idx = find_bucket_idx(ht, key);
int32_t idx = find_bucket_idx(ht, key);
ht_entry *ptr = NULL;
entry_locator loc = ht_find(ht, key);
if (loc.found) {
@ -259,7 +259,7 @@ void census_ht_erase(census_ht *ht, census_ht_key key) {
census_ht_kv *census_ht_get_all_elements(const census_ht *ht, size_t *num) {
census_ht_kv *ret = NULL;
int i = 0;
gpr_int32 idx = -1;
int32_t idx = -1;
GPR_ASSERT(ht != NULL && num != NULL);
*num = ht->size;
if (*num == 0) {

@ -60,7 +60,7 @@ typedef struct unresizable_hash_table census_ht;
/* Currently, the hash_table can take two types of keys. (uint64 for trace
store and const char* for stats store). */
typedef union {
gpr_uint64 val;
uint64_t val;
void *ptr;
} census_ht_key;
@ -73,10 +73,10 @@ typedef struct census_ht_option {
/* Type of hash key */
census_ht_key_type key_type;
/* Desired number of buckets, preferably a prime number */
gpr_int32 num_buckets;
int32_t num_buckets;
/* Fucntion to calculate uint64 hash value of the key. Only takes effect if
key_type is POINTER. */
gpr_uint64 (*hash)(const void *);
uint64_t (*hash)(const void *);
/* Function to compare two keys, returns 0 iff equal. Only takes effect if
key_type is POINTER */
int (*compare_keys)(const void *k1, const void *k2);
@ -126,6 +126,6 @@ typedef void (*census_ht_itr_cb)(census_ht_key key, const void *val_ptr,
/* Iterates through all key-value pairs in the hash_table. The callback function
should not invalidate data entries. */
gpr_uint64 census_ht_for_all(const census_ht *ht, census_ht_itr_cb);
uint64_t census_ht_for_all(const census_ht *ht, census_ht_itr_cb);
#endif /* GRPC_INTERNAL_CORE_STATISTICS_HASH_TABLE_H */

@ -47,7 +47,7 @@ typedef struct census_window_stats_sum cws_sum;
/* Each interval is composed of a number of buckets, which hold a count of
entries and a single statistic */
typedef struct census_window_stats_bucket {
gpr_int64 count;
int64_t count;
void *statistic;
} cws_bucket;
@ -59,11 +59,11 @@ typedef struct census_window_stats_interval_stats {
/* Index of the bucket containing the smallest time interval. */
int bottom_bucket;
/* The smallest time storable in the current window. */
gpr_int64 bottom;
int64_t bottom;
/* The largest time storable in the current window + 1ns */
gpr_int64 top;
int64_t top;
/* The width of each bucket in ns. */
gpr_int64 width;
int64_t width;
} cws_interval_stats;
typedef struct census_window_stats {
@ -76,7 +76,7 @@ typedef struct census_window_stats {
/* Stats for each interval. */
cws_interval_stats *interval_stats;
/* The time the newset stat was recorded. */
gpr_int64 newest_time;
int64_t newest_time;
} window_stats;
/* Calculate an actual bucket index from a logical index 'IDX'. Other
@ -87,10 +87,9 @@ typedef struct census_window_stats {
/* The maximum seconds value we can have in a valid timespec. More than this
will result in overflow in timespec_to_ns(). This works out to ~292 years.
TODO: consider using doubles instead of int64. */
static gpr_int64 max_seconds =
(GPR_INT64_MAX - GPR_NS_PER_SEC) / GPR_NS_PER_SEC;
static int64_t max_seconds = (GPR_INT64_MAX - GPR_NS_PER_SEC) / GPR_NS_PER_SEC;
static gpr_int64 timespec_to_ns(const gpr_timespec ts) {
static int64_t timespec_to_ns(const gpr_timespec ts) {
if (ts.tv_sec > max_seconds) {
return GPR_INT64_MAX - 1;
}
@ -123,7 +122,7 @@ window_stats *census_window_stats_create(int nintervals,
GPR_ASSERT(nintervals > 0 && granularity > 2 && intervals != NULL &&
stat_info != NULL);
for (i = 0; i < nintervals; i++) {
gpr_int64 ns = timespec_to_ns(intervals[i]);
int64_t ns = timespec_to_ns(intervals[i]);
GPR_ASSERT(intervals[i].tv_sec >= 0 && intervals[i].tv_nsec >= 0 &&
intervals[i].tv_nsec < GPR_NS_PER_SEC && ns >= 100 &&
granularity * 10 <= ns);
@ -136,7 +135,7 @@ window_stats *census_window_stats_create(int nintervals,
ret->interval_stats =
(cws_interval_stats *)gpr_malloc(nintervals * sizeof(cws_interval_stats));
for (i = 0; i < nintervals; i++) {
gpr_int64 size_ns = timespec_to_ns(intervals[i]);
int64_t size_ns = timespec_to_ns(intervals[i]);
cws_interval_stats *is = ret->interval_stats + i;
cws_bucket *buckets = is->buckets =
(cws_bucket *)gpr_malloc(ret->nbuckets * sizeof(cws_bucket));
@ -169,7 +168,7 @@ window_stats *census_window_stats_create(int nintervals,
/* When we try adding a measurement above the current interval range, we
need to "shift" the buckets sufficiently to cover the new range. */
static void cws_shift_buckets(const window_stats *wstats,
cws_interval_stats *is, gpr_int64 when_ns) {
cws_interval_stats *is, int64_t when_ns) {
int i;
/* number of bucket time widths to "shift" */
int shift;
@ -194,7 +193,7 @@ static void cws_shift_buckets(const window_stats *wstats,
void census_window_stats_add(window_stats *wstats, const gpr_timespec when,
const void *stat_value) {
int i;
gpr_int64 when_ns = timespec_to_ns(when);
int64_t when_ns = timespec_to_ns(when);
GPR_ASSERT(wstats->interval_stats != NULL);
for (i = 0; i < wstats->nintervals; i++) {
cws_interval_stats *is = wstats->interval_stats + i;
@ -235,7 +234,7 @@ static void cws_add_proportion_to_sum(double p, cws_sum *sum,
void census_window_stats_get_sums(const window_stats *wstats,
const gpr_timespec when, cws_sum sums[]) {
int i;
gpr_int64 when_ns = timespec_to_ns(when);
int64_t when_ns = timespec_to_ns(when);
GPR_ASSERT(wstats->interval_stats != NULL);
for (i = 0; i < wstats->nintervals; i++) {
int when_bucket;
@ -264,7 +263,7 @@ void census_window_stats_get_sums(const window_stats *wstats,
when_bucket = (when_ns - is->bottom) / is->width;
new_bucket = (wstats->newest_time - is->bottom) / is->width;
if (new_bucket == when_bucket) {
gpr_int64 bottom_bucket_time = is->bottom + when_bucket * is->width;
int64_t bottom_bucket_time = is->bottom + when_bucket * is->width;
if (when_ns < wstats->newest_time) {
last_proportion = (double)(when_ns - bottom_bucket_time) /
(double)(wstats->newest_time - bottom_bucket_time);

@ -82,7 +82,7 @@ void *gpr_malloc_aligned(size_t size, size_t alignment_log) {
size_t alignment = ((size_t)1) << alignment_log;
size_t extra = alignment - 1 + sizeof(void *);
void *p = gpr_malloc(size + extra);
void **ret = (void **)(((gpr_uintptr)p + extra) & ~(alignment - 1));
void **ret = (void **)(((uintptr_t)p + extra) & ~(alignment - 1));
ret[-1] = p;
return (void *)ret;
}

@ -48,7 +48,7 @@ static long ncpus = 0;
static void init_ncpus() {
ncpus = sysconf(_SC_NPROCESSORS_ONLN);
if (ncpus < 1 || ncpus > GPR_UINT32_MAX) {
if (ncpus < 1 || ncpus > UINT32_MAX) {
gpr_log(GPR_ERROR, "Cannot determine number of CPUs: assuming 1");
ncpus = 1;
}

@ -66,7 +66,7 @@ struct gpr_histogram {
/* number of buckets */
size_t num_buckets;
/* the buckets themselves */
gpr_uint32 *buckets;
uint32_t *buckets;
};
/* determine a bucket index given a value - does no bounds checking */
@ -102,8 +102,8 @@ gpr_histogram *gpr_histogram_create(double resolution,
h->num_buckets = bucket_for_unchecked(h, max_bucket_start) + 1;
GPR_ASSERT(h->num_buckets > 1);
GPR_ASSERT(h->num_buckets < 100000000);
h->buckets = gpr_malloc(sizeof(gpr_uint32) * h->num_buckets);
memset(h->buckets, 0, sizeof(gpr_uint32) * h->num_buckets);
h->buckets = gpr_malloc(sizeof(uint32_t) * h->num_buckets);
memset(h->buckets, 0, sizeof(uint32_t) * h->num_buckets);
return h;
}
@ -137,7 +137,7 @@ int gpr_histogram_merge(gpr_histogram *dst, const gpr_histogram *src) {
return 1;
}
void gpr_histogram_merge_contents(gpr_histogram *dst, const gpr_uint32 *data,
void gpr_histogram_merge_contents(gpr_histogram *dst, const uint32_t *data,
size_t data_count, double min_seen,
double max_seen, double sum,
double sum_of_squares, double count) {
@ -238,7 +238,7 @@ double gpr_histogram_sum_of_squares(gpr_histogram *h) {
return h->sum_of_squares;
}
const gpr_uint32 *gpr_histogram_get_contents(gpr_histogram *h, size_t *size) {
const uint32_t *gpr_histogram_get_contents(gpr_histogram *h, size_t *size) {
*size = h->num_buckets;
return h->buckets;
}

@ -45,7 +45,7 @@
#include <time.h>
#include <pthread.h>
static gpr_intptr gettid(void) { return (gpr_intptr)pthread_self(); }
static intptr_t gettid(void) { return (intptr_t)pthread_self(); }
void gpr_log(const char *file, int line, gpr_log_severity severity,
const char *format, ...) {

@ -46,19 +46,19 @@
handle aligned reads, do the conversion here */
#define GETBLOCK32(p, i) (p)[(i)]
gpr_uint32 gpr_murmur_hash3(const void *key, size_t len, gpr_uint32 seed) {
const gpr_uint8 *data = (const gpr_uint8 *)key;
uint32_t gpr_murmur_hash3(const void *key, size_t len, uint32_t seed) {
const uint8_t *data = (const uint8_t *)key;
const size_t nblocks = len / 4;
int i;
gpr_uint32 h1 = seed;
gpr_uint32 k1;
uint32_t h1 = seed;
uint32_t k1;
const gpr_uint32 c1 = 0xcc9e2d51;
const gpr_uint32 c2 = 0x1b873593;
const uint32_t c1 = 0xcc9e2d51;
const uint32_t c2 = 0x1b873593;
const gpr_uint32 *blocks = ((const gpr_uint32 *)key) + nblocks;
const gpr_uint8 *tail = (const gpr_uint8 *)(data + nblocks * 4);
const uint32_t *blocks = ((const uint32_t *)key) + nblocks;
const uint8_t *tail = (const uint8_t *)(data + nblocks * 4);
/* body */
for (i = -(int)nblocks; i; i++) {
@ -78,9 +78,9 @@ gpr_uint32 gpr_murmur_hash3(const void *key, size_t len, gpr_uint32 seed) {
/* tail */
switch (len & 3) {
case 3:
k1 ^= ((gpr_uint32)tail[2]) << 16;
k1 ^= ((uint32_t)tail[2]) << 16;
case 2:
k1 ^= ((gpr_uint32)tail[1]) << 8;
k1 ^= ((uint32_t)tail[1]) << 8;
case 1:
k1 ^= tail[0];
k1 *= c1;
@ -90,7 +90,7 @@ gpr_uint32 gpr_murmur_hash3(const void *key, size_t len, gpr_uint32 seed) {
};
/* finalization */
h1 ^= (gpr_uint32)len;
h1 ^= (uint32_t)len;
FMIX32(h1);
return h1;
}

@ -39,6 +39,6 @@
#include <stddef.h>
/* compute the hash of key (length len) */
gpr_uint32 gpr_murmur_hash3(const void *key, size_t len, gpr_uint32 seed);
uint32_t gpr_murmur_hash3(const void *key, size_t len, uint32_t seed);
#endif /* GRPC_INTERNAL_CORE_SUPPORT_MURMUR_HASH_H */

@ -67,7 +67,7 @@ static gpr_slice_refcount noop_refcount = {noop_ref_or_unref,
gpr_slice gpr_slice_from_static_string(const char *s) {
gpr_slice slice;
slice.refcount = &noop_refcount;
slice.data.refcounted.bytes = (gpr_uint8 *)s;
slice.data.refcounted.bytes = (uint8_t *)s;
slice.data.refcounted.length = strlen(s);
return slice;
}
@ -203,13 +203,13 @@ gpr_slice gpr_slice_malloc(size_t length) {
/* The slices refcount points back to the allocated block. */
slice.refcount = &rc->base;
/* The data bytes are placed immediately after the refcount struct */
slice.data.refcounted.bytes = (gpr_uint8 *)(rc + 1);
slice.data.refcounted.bytes = (uint8_t *)(rc + 1);
/* And the length of the block is set to the requested length */
slice.data.refcounted.length = length;
} else {
/* small slice: just inline the data */
slice.refcount = NULL;
slice.data.inlined.length = (gpr_uint8)length;
slice.data.inlined.length = (uint8_t)length;
}
return slice;
}
@ -232,7 +232,7 @@ gpr_slice gpr_slice_sub_no_ref(gpr_slice source, size_t begin, size_t end) {
/* Enforce preconditions */
GPR_ASSERT(source.data.inlined.length >= end);
subset.refcount = NULL;
subset.data.inlined.length = (gpr_uint8)(end - begin);
subset.data.inlined.length = (uint8_t)(end - begin);
memcpy(subset.data.inlined.bytes, source.data.inlined.bytes + begin,
end - begin);
}
@ -244,7 +244,7 @@ gpr_slice gpr_slice_sub(gpr_slice source, size_t begin, size_t end) {
if (end - begin <= sizeof(subset.data.inlined.bytes)) {
subset.refcount = NULL;
subset.data.inlined.length = (gpr_uint8)(end - begin);
subset.data.inlined.length = (uint8_t)(end - begin);
memcpy(subset.data.inlined.bytes, GPR_SLICE_START_PTR(source) + begin,
end - begin);
} else {
@ -262,17 +262,17 @@ gpr_slice gpr_slice_split_tail(gpr_slice *source, size_t split) {
/* inlined data, copy it out */
GPR_ASSERT(source->data.inlined.length >= split);
tail.refcount = NULL;
tail.data.inlined.length = (gpr_uint8)(source->data.inlined.length - split);
tail.data.inlined.length = (uint8_t)(source->data.inlined.length - split);
memcpy(tail.data.inlined.bytes, source->data.inlined.bytes + split,
tail.data.inlined.length);
source->data.inlined.length = (gpr_uint8)split;
source->data.inlined.length = (uint8_t)split;
} else {
size_t tail_length = source->data.refcounted.length - split;
GPR_ASSERT(source->data.refcounted.length >= split);
if (tail_length < sizeof(tail.data.inlined.bytes)) {
/* Copy out the bytes - it'll be cheaper than refcounting */
tail.refcount = NULL;
tail.data.inlined.length = (gpr_uint8)tail_length;
tail.data.inlined.length = (uint8_t)tail_length;
memcpy(tail.data.inlined.bytes, source->data.refcounted.bytes + split,
tail_length);
} else {
@ -297,17 +297,17 @@ gpr_slice gpr_slice_split_head(gpr_slice *source, size_t split) {
GPR_ASSERT(source->data.inlined.length >= split);
head.refcount = NULL;
head.data.inlined.length = (gpr_uint8)split;
head.data.inlined.length = (uint8_t)split;
memcpy(head.data.inlined.bytes, source->data.inlined.bytes, split);
source->data.inlined.length =
(gpr_uint8)(source->data.inlined.length - split);
(uint8_t)(source->data.inlined.length - split);
memmove(source->data.inlined.bytes, source->data.inlined.bytes + split,
source->data.inlined.length);
} else if (split < sizeof(head.data.inlined.bytes)) {
GPR_ASSERT(source->data.refcounted.length >= split);
head.refcount = NULL;
head.data.inlined.length = (gpr_uint8)split;
head.data.inlined.length = (uint8_t)split;
memcpy(head.data.inlined.bytes, source->data.refcounted.bytes, split);
source->data.refcounted.bytes += split;
source->data.refcounted.length -= split;

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save