Thank you very much your prompt response. I can not expect to get the answer is too early. Thank you very much. But now I get another issue with “max_length”. Can you pls help me to resolve the issue.
Traceback (most recent call last):
File “c:\Users\manto\Documents\medibot_rag\connect_with_memory_llm.py”, line 72, in
response=qa_chain.invoke({‘query’: user_query})
File “C:\Users\manto.virtualenvs\medibot_rag-BvpnU3Fv\Lib\site-packages\langchain\chains\base.py”, line 167, in invoke
raise e
File “C:\Users\manto.virtualenvs\medibot_rag-BvpnU3Fv\Lib\site-packages\langchain\chains\base.py”, line 157, in invoke
self._call(inputs, run_manager=run_manager)
~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File “C:\Users\manto.virtualenvs\medibot_rag-BvpnU3Fv\Lib\site-packages\langchain\chains\retrieval_qa\base.py”, line 154, in _call
answer = self.combine_documents_chain.run(
input_documents=docs, question=question, callbacks=_run_manager.get_child()
)
File “C:\Users\manto.virtualenvs\medibot_rag-BvpnU3Fv\Lib\site-packages\langchain_core_api\deprecation.py”, line 191, in warning_emitting_wrapper
return wrapped(*args, **kwargs)
File “C:\Users\manto.virtualenvs\medibot_rag-BvpnU3Fv\Lib\site-packages\langchain\chains\base.py”, line 608, in run
return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[
~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File “C:\Users\manto.virtualenvs\medibot_rag-BvpnU3Fv\Lib\site-packages\langchain_core_api\deprecation.py”, line 191, in warning_emitting_wrapper
return wrapped(*args, **kwargs)
File “C:\Users\manto.virtualenvs\medibot_rag-BvpnU3Fv\Lib\site-packages\langchain\chains\base.py”, line 386, in call
return self.invoke(
~~~~~~~~~~~^
inputs,
^^^^^^^
…<2 lines>…
include_run_info=include_run_info,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File “C:\Users\manto.virtualenvs\medibot_rag-BvpnU3Fv\Lib\site-packages\langchain\chains\base.py”, line 167, in invoke
raise e
File “C:\Users\manto.virtualenvs\medibot_rag-BvpnU3Fv\Lib\site-packages\langchain\chains\base.py”, line 157, in invoke
self._call(inputs, run_manager=run_manager)
~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File “C:\Users\manto.virtualenvs\medibot_rag-BvpnU3Fv\Lib\site-packages\langchain\chains\combine_documents\base.py”, line 138,
in _call
output, extra_return_dict = self.combine_docs(
~~~~~~~~~~~~~~~~~^
docs, callbacks=_run_manager.get_child(), **other_keys
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File “C:\Users\manto.virtualenvs\medibot_rag-BvpnU3Fv\Lib\site-packages\langchain\chains\combine_documents\stuff.py”, line 259, in combine_docs
return self.llm_chain.predict(callbacks=callbacks, **inputs), {}
~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File “C:\Users\manto.virtualenvs\medibot_rag-BvpnU3Fv\Lib\site-packages\langchain\chains\llm.py”, line 319, in predict
return self(kwargs, callbacks=callbacks)[self.output_key]
~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File “C:\Users\manto.virtualenvs\medibot_rag-BvpnU3Fv\Lib\site-packages\langchain_core_api\deprecation.py”, line 191, in warning_emitting_wrapper
return wrapped(*args, **kwargs)
File “C:\Users\manto.virtualenvs\medibot_rag-BvpnU3Fv\Lib\site-packages\langchain\chains\base.py”, line 386, in call
return self.invoke(
~~~~~~~~~~~^
inputs,
^^^^^^^
…<2 lines>…
include_run_info=include_run_info,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File “C:\Users\manto.virtualenvs\medibot_rag-BvpnU3Fv\Lib\site-packages\langchain\chains\base.py”, line 167, in invoke
raise e
File “C:\Users\manto.virtualenvs\medibot_rag-BvpnU3Fv\Lib\site-packages\langchain\chains\base.py”, line 157, in invoke
self._call(inputs, run_manager=run_manager)
~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File “C:\Users\manto.virtualenvs\medibot_rag-BvpnU3Fv\Lib\site-packages\langchain\chains\llm.py”, line 127, in _call
response = self.generate([inputs], run_manager=run_manager)
File “C:\Users\manto.virtualenvs\medibot_rag-BvpnU3Fv\Lib\site-packages\langchain\chains\llm.py”, line 139, in generate
return self.llm.generate_prompt(
~~~~~~~~~~~~~~~~~~~~~~~~^
prompts,
^^^^^^^^
…<2 lines>…
**self.llm_kwargs,
^^^^^^^^^^^^^^^^^^
)
^
File “C:\Users\manto.virtualenvs\medibot_rag-BvpnU3Fv\Lib\site-packages\langchain_core\language_models\llms.py”, line 766, in generate_prompt
return self.generate(prompt_strings, stop=stop, callbacks=callbacks, **kwargs)
~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File “C:\Users\manto.virtualenvs\medibot_rag-BvpnU3Fv\Lib\site-packages\langchain_core\language_models\llms.py”, line 973, in generate
return self._generate_helper(
~~~~~~~~~~~~~~~~~~~~~^
prompts,
^^^^^^^^
…<3 lines>…
**kwargs,
^^^^^^^^^
)
^
File “C:\Users\manto.virtualenvs\medibot_rag-BvpnU3Fv\Lib\site-packages\langchain_core\language_models\llms.py”, line 792, in _generate_helper
self._generate(
~~~~~~~~~~~~~~^
prompts,
^^^^^^^^
…<3 lines>…
**kwargs,
^^^^^^^^^
)
^
File “C:\Users\manto.virtualenvs\medibot_rag-BvpnU3Fv\Lib\site-packages\langchain_core\language_models\llms.py”, line 1547, in
_generate
self._call(prompt, stop=stop, run_manager=run_manager, **kwargs)
~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File “C:\Users\manto.virtualenvs\medibot_rag-BvpnU3Fv\Lib\site-packages\langchain_huggingface\llms\huggingface_endpoint.py”, line 312, in _call
response_text = self.client.text_generation(
prompt=prompt,
model=self.model,
**invocation_params,
)
TypeError: InferenceClient.text_generation() got an unexpected keyword argument ‘max_length’
1 Like