nvidia-riva / python-clients

Riva Python client API and CLI utils
MIT License
57 stars 19 forks source link

Method not found #59

Open aldiazhar opened 8 months ago

aldiazhar commented 8 months ago

Hello, I am following the ASR tutorial using Python and Riva from NVIDIA tutorial asr on a Mac OS system and using Jupyter Notebook.

However, I am encountering an issue with the offline_recognize function:

response = asr_service.offline_recognize(data, offline_config)

I am getting the following error:

---------------------------------------------------------------------------
_InactiveRpcError                         Traceback (most recent call last)
Cell In[10], line 1
----> 1 response = asr_service.offline_recognize(data, offline_config)

File /usr/local/lib/python3.11/site-packages/riva/client/asr.py:380, in ASRService.offline_recognize(self, audio_bytes, config, future)
    378 request = rasr.RecognizeRequest(config=config, audio=audio_bytes)
    379 func = self.stub.Recognize.future if future else self.stub.Recognize
--> 380 return func(request, metadata=self.auth.get_auth_metadata())

File /usr/local/lib/python3.11/site-packages/grpc/_channel.py:1161, in _UnaryUnaryMultiCallable.__call__(self, request, timeout, metadata, credentials, wait_for_ready, compression)
   1146 def __call__(
   1147     self,
   1148     request: Any,
   (...)
   1153     compression: Optional[grpc.Compression] = None,
   1154 ) -> Any:
   1155     (
   1156         state,
   1157         call,
   1158     ) = self._blocking(
   1159         request, timeout, metadata, credentials, wait_for_ready, compression
   1160     )
-> 1161     return _end_unary_response_blocking(state, call, False, None)

File /usr/local/lib/python3.11/site-packages/grpc/_channel.py:1004, in _end_unary_response_blocking(state, call, with_call, deadline)
   1002         return state.response
   1003 else:
-> 1004     raise _InactiveRpcError(state)

_InactiveRpcError: <_InactiveRpcError of RPC that terminated with:
    status = StatusCode.UNIMPLEMENTED
    details = "Method not found!"
    debug_error_string = "UNKNOWN:Error received from peer  {created_time:"2023-11-16T10:57:11.537789+07:00", grpc_status:12, grpc_message:"Method not found!"}"

server.py

import asyncio
import grpc
import sample_pb2
import sample_pb2_grpc
from grpc_reflection.v1alpha import reflection

class GreeterServicer(sample_pb2_grpc.GreeterServicer):
    async def SayHello(self, request, context):
        return sample_pb2.HelloReply(message=f"Hello, {request.name}!")

async def serve():
    server = grpc.aio.server()

    # Enable reflection
    SERVICE_NAMES = (
        sample_pb2.DESCRIPTOR.services_by_name['Greeter'].full_name,
        reflection.SERVICE_NAME,
    )
    reflection.enable_server_reflection(SERVICE_NAMES, server)

    sample_pb2_grpc.add_GreeterServicer_to_server(GreeterServicer(), server)

    server.add_insecure_port("[::]:50051")
    print("Server started on port 50051")
    await server.start()
    await server.wait_for_termination()

if __name__ == "__main__":
    asyncio.run(serve())

Are there any suggestions or specific solutions to address this issue, especially considering the use of Mac OS and Jupyter Notebook?"

aldiazhar commented 8 months ago

I've identified the issue related to the proto and server.py files, so I made changes to both. However, after the modifications, there is no response from:

response = asr_service.offline_recognize(data, offline_config)

and there's no error message either.

server.py

import grpc
from concurrent import futures
import asyncio
import riva.proto.riva_asr_pb2 as riva_asr_pb2
import riva.proto.riva_asr_pb2_grpc as riva_asr_pb2_grpc

class RivaSpeechRecognitionServicer(riva_asr_pb2_grpc.RivaSpeechRecognitionServicer):
    async def Recognize(self, request, context):
        # Implement your logic for batch processing here
        response = riva_asr_pb2.RecognizeResponse()
        # Populate response with recognition results
        return response

    async def StreamingRecognize(self, request_iterator, context):
        # Implement your logic for streaming recognition here
        async for streaming_request in request_iterator:
            # Process streaming request and update recognition results
            response = riva_asr_pb2.StreamingRecognizeResponse()
            # Populate response with streaming recognition results
            yield response

    async def GetRivaSpeechRecognitionConfig(self, request, context):
        # Implement logic to provide ASR configuration
        response = riva_asr_pb2.RivaSpeechRecognitionConfigResponse()
        # Populate response with ASR configuration
        return response

async def serve():
    server = grpc.aio.server()

    riva_asr_pb2_grpc.add_RivaSpeechRecognitionServicer_to_server(RivaSpeechRecognitionServicer(), server)

    server.add_insecure_port("[::]:50051")
    print("Server started on port 50051")
    await server.start()
    await server.wait_for_termination()

if __name__ == "__main__":
    asyncio.run(serve())