Do not reload grpc in unit tests

This can break subsequently run tests, including any which have already
stored references to gRPC enums (such as grpc.StatusCode.OK). The
subsequent tests will compare now be comparing the old enums to the
reloaded enums, and they will not match. This causes errors in
_metadata_code_details_test and a hang in _metadata_flags_test, when run
in sequence locally after _logging_test.

It's unclear why this has been working on Kokoro, but it is reproducible
locally and is behavior that should be avoided.
pull/17641/head
Eric Gribkoff 6 years ago
parent 2dda0bb21b
commit d79d2f1ca7
  1. 104
      src/python/grpcio_tests/tests/unit/_logging_test.py

@ -14,66 +14,86 @@
"""Test of gRPC Python's interaction with the python logging module"""
import unittest
import six
from six.moves import reload_module
import logging
import grpc
import functools
import subprocess
import sys
INTERPRETER = sys.executable
def patch_stderr(f):
@functools.wraps(f)
def _impl(*args, **kwargs):
old_stderr = sys.stderr
sys.stderr = six.StringIO()
try:
f(*args, **kwargs)
finally:
sys.stderr = old_stderr
class LoggingTest(unittest.TestCase):
return _impl
def test_logger_not_occupied(self):
script = """if True:
import logging
import grpc
def isolated_logging(f):
if len(logging.getLogger().handlers) != 0:
raise Exception('expected 0 logging handlers')
@functools.wraps(f)
def _impl(*args, **kwargs):
reload_module(logging)
reload_module(grpc)
try:
f(*args, **kwargs)
finally:
reload_module(logging)
"""
self._verifyScriptSucceeds(script)
return _impl
def test_handler_found(self):
script = """if True:
import logging
import grpc
"""
out, err = self._verifyScriptSucceeds(script)
self.assertEqual(0, len(err), 'unexpected output to stderr')
class LoggingTest(unittest.TestCase):
def test_can_configure_logger(self):
script = """if True:
import logging
import six
@isolated_logging
def test_logger_not_occupied(self):
self.assertEqual(0, len(logging.getLogger().handlers))
import grpc
@patch_stderr
@isolated_logging
def test_handler_found(self):
self.assertEqual(0, len(sys.stderr.getvalue()))
@isolated_logging
def test_can_configure_logger(self):
intended_stream = six.StringIO()
logging.basicConfig(stream=intended_stream)
self.assertEqual(1, len(logging.getLogger().handlers))
self.assertIs(logging.getLogger().handlers[0].stream, intended_stream)
intended_stream = six.StringIO()
logging.basicConfig(stream=intended_stream)
if len(logging.getLogger().handlers) != 1:
raise Exception('expected 1 logging handler')
if logging.getLogger().handlers[0].stream is not intended_stream:
raise Exception('wrong handler stream')
"""
self._verifyScriptSucceeds(script)
@isolated_logging
def test_grpc_logger(self):
self.assertIn("grpc", logging.Logger.manager.loggerDict)
root_logger = logging.getLogger("grpc")
self.assertEqual(1, len(root_logger.handlers))
self.assertIsInstance(root_logger.handlers[0], logging.NullHandler)
script = """if True:
import logging
import grpc
if "grpc" not in logging.Logger.manager.loggerDict:
raise Exception('grpc logger not found')
root_logger = logging.getLogger("grpc")
if len(root_logger.handlers) != 1:
raise Exception('expected 1 root logger handler')
if not isinstance(root_logger.handlers[0], logging.NullHandler):
raise Exception('expected logging.NullHandler')
"""
self._verifyScriptSucceeds(script)
def _verifyScriptSucceeds(self, script):
process = subprocess.Popen(
[INTERPRETER, '-c', script],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = process.communicate()
self.assertEqual(
0, process.returncode,
'process failed with exit code %d (stdout: %s, stderr: %s)' %
(process.returncode, out, err))
return out, err
if __name__ == '__main__':

Loading…
Cancel
Save