tags=[‘Chroma’, ‘HuggingFaceEmbeddings’] vectorstore=<langchain_chroma.vectorstores.Chroma object at 0x7f3e9ef40e80> search_kwargs={‘k’: 3}
Using model: mistralai/Mistral-7B-v0.1
/home/user/app/app/recommender.py:84: LangChainDeprecationWarning: The method Chain.__call__
was deprecated in langchain 0.1.0 and will be removed in 1.0. Use :meth:~invoke
instead.
result = qa_chain({“query”: question})
Generation Error:
Traceback (most recent call last):
File “/home/user/app/app/recommender.py”, line 84, in retrieve_and_generate
result = qa_chain({“query”: question})
File “/usr/local/lib/python3.10/site-packages/langchain_core/_api/deprecation.py”, line 191, in warning_emitting_wrapper
return wrapped(*args, **kwargs)
File “/usr/local/lib/python3.10/site-packages/langchain/chains/base.py”, line 386, in call
return self.invoke(
File “/usr/local/lib/python3.10/site-packages/langchain/chains/base.py”, line 167, in invoke
raise e
File “/usr/local/lib/python3.10/site-packages/langchain/chains/base.py”, line 157, in invoke
self._call(inputs, run_manager=run_manager)
File “/usr/local/lib/python3.10/site-packages/langchain/chains/retrieval_qa/base.py”, line 154, in _call
answer = self.combine_documents_chain.run(
File “/usr/local/lib/python3.10/site-packages/langchain_core/_api/deprecation.py”, line 191, in warning_emitting_wrapper
return wrapped(*args, **kwargs)
File “/usr/local/lib/python3.10/site-packages/langchain/chains/base.py”, line 608, in run
return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[
File “/usr/local/lib/python3.10/site-packages/langchain_core/_api/deprecation.py”, line 191, in warning_emitting_wrapper
return wrapped(*args, **kwargs)
File “/usr/local/lib/python3.10/site-packages/langchain/chains/base.py”, line 386, in call
return self.invoke(
File “/usr/local/lib/python3.10/site-packages/langchain/chains/base.py”, line 167, in invoke
raise e
File “/usr/local/lib/python3.10/site-packages/langchain/chains/base.py”, line 157, in invoke
self._call(inputs, run_manager=run_manager)
File “/usr/local/lib/python3.10/site-packages/langchain/chains/combine_documents/base.py”, line 138, in _call
output, extra_return_dict = self.combine_docs(
File “/usr/local/lib/python3.10/site-packages/langchain/chains/combine_documents/stuff.py”, line 259, in combine_docs
return self.llm_chain.predict(callbacks=callbacks, **inputs), {}
File “/usr/local/lib/python3.10/site-packages/langchain/chains/llm.py”, line 319, in predict
return self(kwargs, callbacks=callbacks)[self.output_key]
File “/usr/local/lib/python3.10/site-packages/langchain_core/_api/deprecation.py”, line 191, in warning_emitting_wrapper
return wrapped(*args, **kwargs)
File “/usr/local/lib/python3.10/site-packages/langchain/chains/base.py”, line 386, in call
return self.invoke(
File “/usr/local/lib/python3.10/site-packages/langchain/chains/base.py”, line 167, in invoke
raise e
File “/usr/local/lib/python3.10/site-packages/langchain/chains/base.py”, line 157, in invoke
self._call(inputs, run_manager=run_manager)
File “/usr/local/lib/python3.10/site-packages/langchain/chains/llm.py”, line 127, in _call
response = self.generate([inputs], run_manager=run_manager)
File “/usr/local/lib/python3.10/site-packages/langchain/chains/llm.py”, line 139, in generate
return self.llm.generate_prompt(
File “/usr/local/lib/python3.10/site-packages/langchain_core/language_models/llms.py”, line 764, in generate_prompt
return self.generate(prompt_strings, stop=stop, callbacks=callbacks, **kwargs)
File “/usr/local/lib/python3.10/site-packages/langchain_core/language_models/llms.py”, line 971, in generate
return self._generate_helper(
File “/usr/local/lib/python3.10/site-packages/langchain_core/language_models/llms.py”, line 790, in _generate_helper
self._generate(
File “/usr/local/lib/python3.10/site-packages/langchain_core/language_models/llms.py”, line 1545, in _generate
self._call(prompt, stop=stop, run_manager=run_manager, **kwargs)
File “/usr/local/lib/python3.10/site-packages/langchain_huggingface/llms/huggingface_endpoint.py”, line 312, in _call
response_text = self.client.text_generation(
File “/usr/local/lib/python3.10/site-packages/huggingface_hub/inference/_client.py”, line 2297, in text_generation
provider_helper = get_provider_helper(self.provider, task=“text-generation”, model=model_id)
File “/usr/local/lib/python3.10/site-packages/huggingface_hub/inference/_providers/init.py”, line 165, in get_provider_helper
provider = next(iter(provider_mapping))
StopIteration
def retrieve_and_generate(retriever, question, use_hf=True):
prompt = ChatPromptTemplate.from_messages([
SystemMessagePromptTemplate.from_template(
"Use the following pieces of context to answer the question at the end. "
“If you don’t know the answer, just say you don’t know — don’t try to make up an answer.”
),
HumanMessagePromptTemplate.from_template(
“Context:\n{context}\n\nQuestion:\n{question}\n\nHelpful Answer:”
)
])
try:
if use_hf:
hf_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
if not hf_token:
raise EnvironmentError("Missing HUGGINGFACEHUB_API_TOKEN environment variable")
llm = HuggingFaceEndpoint(
repo_id="mistralai/Mistral-7B-v0.1",
temperature=0.7,
huggingfacehub_api_token=hf_token,
task="text2text-generation"
)
else:
llm = OllamaLLM(model="llama2", temperature=0.7)
print(f"Using model: {getattr(llm, 'repo_id', 'local model')}")
qa_chain = RetrievalQA.from_chain_type(
llm=llm,
retriever=retriever,
chain_type="stuff",
chain_type_kwargs={"prompt": prompt},
return_source_documents=True
)
# Use 'query' key as per RetrievalQA interface
result = qa_chain({"query": question})
return question, result["result"]
except Exception as e:
print(f"Generation Error: {e}")
traceback.print_exc()
return question, "Could not generate a response."
def load_retriver(foodPlace:str,presist_dir: str = “tmp/data/vector_store”) → VectorStoreRetriever:
“”"
load ChromaDB vectore store and retrun langchain retriver.
“”"
try:
logging.info(“Data retriver started..”)
safe_foodPlace = re.sub(r"[^\w.-]", “_”, foodPlace)
chroma_path=os.path.join(presist_dir,safe_foodPlace)
print(chroma_path)
if not os.path.join(chroma_path):
raise ValueError(f"no vectore store found at {chroma_path}")
embedding=HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2')
vectoreDB=Chroma(
collection_name=safe_foodPlace,
persist_directory=chroma_path,
embedding_function=embedding
)
retriver=vectoreDB.as_retriever(
search_type="similarity",
search_kwargs ={'k':3}
)
return retriver
except Exception as e:
raise CustomException(e,sys)
def check_dB_data(foodPlace):
# Data ingestion if vector store doesn’t exist
vector_path = get_foodPlace_vector_path(foodPlace)
if os.path.exists(vector_path):
print(f"\n[INFO] Vector store already exists for {foodPlace}. Skipping data pull.“)
else:
print(f”\n[INFO] Pulling data for {foodPlace}…")
cleaner_store = CleanAndSaveToChromaDBC()
try:
tripa = TripAdviserDataPull(foodPlace=foodPlace)
tripa_path = tripa.initiate_tripadviser_data_pull()
cleaner_store.initiate_clean_chromadb(foodPlace=foodPlace, filepath=[tripa_path])
except:
print("No tripa data..")
try:
google = GoogleMapsDataPull(foodPlace=foodPlace)
google_path = google.initiate_google_maps_data_pull()
cleaner_store.initiate_clean_chromadb(foodPlace=foodPlace, filepath=[google_path])
except:
print("No data in google review....")
# Load retriever
print("\n[INFO] Loading retriever and querying...")
retriever = load_retriver(foodPlace=foodPlace)
print(retriever)
return retriever
NEED HELP WITH THIS ERROR