-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathgrpc_client.py
More file actions
113 lines (93 loc) · 3.65 KB
/
grpc_client.py
File metadata and controls
113 lines (93 loc) · 3.65 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
# Copyright 2019 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example of multiprocessing concurrency with gRPC."""
from __future__ import (
absolute_import,
division,
print_function,
)
import atexit
import logging
import multiprocessing
import operator
import sys
import time
import grpc # type: ignore[import]
import prime_pb2 # type: ignore[import]
import prime_pb2_grpc # type: ignore[import]
_PROCESS_COUNT = 20
_MAXIMUM_CANDIDATE = 10000
# Each worker process initializes a single channel after forking.
# It's regrettable, but to ensure that each subprocess only has to instantiate
# a single channel to be reused across all RPCs, we use globals.
_worker_channel_singleton = None
_worker_stub_singleton = None
_LOGGER = logging.getLogger(__name__)
def _shutdown_worker():
_LOGGER.info("Shutting worker process down.")
if _worker_channel_singleton is not None:
_worker_channel_singleton.close()
def _initialize_worker(server_address):
global _worker_channel_singleton # pylint: disable=global-statement
global _worker_stub_singleton # pylint: disable=global-statement
# _LOGGER.info("Initializing worker process.")
_worker_channel_singleton = grpc.insecure_channel(server_address)
_worker_stub_singleton = prime_pb2_grpc.PrimeCheckerStub(
_worker_channel_singleton
)
atexit.register(_shutdown_worker)
def _run_worker_query(primality_candidate):
# _LOGGER.info("Checking primality of %s.", primality_candidate)
result = _worker_stub_singleton.check(
prime_pb2.PrimeCandidate(candidate=primality_candidate)
)
# print(result)
return result
def _calculate_primes(server_address):
worker_pool = multiprocessing.Pool(
processes=_PROCESS_COUNT,
initializer=_initialize_worker,
initargs=(server_address,),
)
check_range = range(2, _MAXIMUM_CANDIDATE)
primality = worker_pool.map(_run_worker_query, check_range)
primes = zip(check_range, map(operator.attrgetter("isPrime"), primality))
# _LOGGER.info(f"primes: {primes}")
return tuple(primes)
def main():
# msg:str = "Determine the primality of the first {} integers.".format( _MAXIMUM_CANDIDATE )
# parser = argparse.ArgumentParser(description=msg)
# parser.add_argument(
# "server_address",
# help="The address of the server (e.g. localhost:51317)",
# default="localhost:35065"
# )
# args = parser.parse_args()
# primes = _calculate_primes(args.server_address)
start_main = time.time()
for _ in range(17): # Replace repeated calls with a loop
start_call = time.time()
primes = _calculate_primes("localhost:51317")
end_call = time.time()
print(f"Execution time for _calculate_primes: {end_call - start_call:.5f} seconds")
end_main = time.time()
print(primes)
print(f"Total execution time for main: {end_main - start_main:.5f} seconds")
if __name__ == "__main__":
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter("[PID %(process)d] %(message)s")
handler.setFormatter(formatter)
_LOGGER.addHandler(handler)
_LOGGER.setLevel(logging.INFO)
main()