import os from concurrent import futures import ai_pb2 import ai_pb2_grpc import grpc import init class AIServer(ai_pb2_grpc.LLMQueryServicer): def QueryDocumentRequest(self, request, context): _text = request.text _user_id = request.user_id _database = request.database _collection = request.collection vector = init.text_to_vector(_text) data = init.insert_document(_user_id, vector, _database) return ai_pb2.AddDocumentReply( id=request.text ) def serve(): _ADDR = os.getenv("BIND") if _ADDR is None: _ADDR = "[::]:50051" print("Listening on", _ADDR) server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) ai_pb2_grpc.add_LLMQueryServicer_to_server(AIServer(), server) server.add_insecure_port(_ADDR) server.start() server.wait_for_termination() serve()