Cannot launch QA pipeline with google-t5/t5-large

Hello, i have a problem.
I tried to write a QA bot and i managed to do it with HuggingFaceH4/zephyr-7b-beta but the same code doesn’t work with google-t5/t5-large.

This is my general pipeline and function to ask

    def _create_general_pipeline(self):
        pipe = Pipeline()
        pipe.add_component("embedder", SentenceTransformersTextEmbedder(model="sentence-transformers/all-MiniLM-L6-v2"))
        pipe.add_component("retriever", ChromaEmbeddingRetriever(document_store=self.document_store))
        pipe.add_component("prompt_builder", PromptBuilder(template=self.template))
        pipe.add_component(
            "llm",
            HuggingFaceAPIGenerator(api_type = self.api_type, api_params={"model" : self.model}, token=Secret.from_token(self.api_token))
            )
        pipe.connect("embedder.embedding", "retriever.query_embedding")
        pipe.connect("retriever", "prompt_builder.documents")
        pipe.connect("prompt_builder", "llm")
        
        return pipe
    
    def ask(self, question):
        self.preprocessing_pipeline.run({"file_type_router": {"sources": list(Path(self.output_dir).glob("**/*"))}})
        answer = self.genereal_pipeline.run(
    {
        "embedder": {"text": question},
        "prompt_builder": {"question": question},
        "llm": {"generation_kwargs": {"max_new_tokens": 250}},
    }
)

        return answer

It works with HuggingFaceH4/zephyr-7b-beta, but doesnt with google-t5/t5-large

But after batching etc, script outputs this

D:\projects\haystack2\.venv\Lib\site-packages\huggingface_hub\inference\_client.py:2198: UserWarning: API endpoint/model for text-generation is not served via TGI. Ignoring following parameters: details.
  warnings.warn(
D:\projects\haystack2\.venv\Lib\site-packages\huggingface_hub\inference\_client.py:2204: UserWarning: API endpoint/model for text-generation is not served via TGI. Parameter `details=True` will be ignored meaning only the generated text will be returned.
  warnings.warn(
Traceback (most recent call last):
  File "d:\projects\haystack2\setup_bot.py", line 107, in <module>
    answer = bot.ask(question)
             ^^^^^^^^^^^^^^^^^
  File "d:\projects\haystack2\setup_bot.py", line 92, in ask
    answer = self.genereal_pipeline.run(
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "D:\projects\haystack2\.venv\Lib\site-packages\haystack\core\pipeline\pipeline.py", line 249, in run
    res: Dict[str, Any] = self._run_component(name, last_inputs[name])
                          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "D:\projects\haystack2\.venv\Lib\site-packages\haystack\core\pipeline\pipeline.py", line 76, in _run_component
    res: Dict[str, Any] = instance.run(**inputs)
                          ^^^^^^^^^^^^^^^^^^^^^^
  File "D:\projects\haystack2\.venv\Lib\site-packages\haystack\components\generators\hugging_face_api.py", line 194, in run
    return self._run_non_streaming(prompt, generation_kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "D:\projects\haystack2\.venv\Lib\site-packages\haystack\components\generators\hugging_face_api.py", line 222, in _run_non_streaming
    "finish_reason": tgr.details.finish_reason if tgr.details else None,
                                                  ^^^^^^^^^^^
AttributeError: 'str' object has no attribute 'details'