Falcon 7B response toxicity check using trulens

The code that I have used is attached herewith:-

import os
os.environ["HUGGINGFACE_API_KEY"]='<YOUR_OWN_API_KEY>'
os.environ["HUGGINGFACEHUB_API_TOKEN"]='<YOUR_OWN_API_KEY>'
from langchain import PromptTemplate
from langchain.chains import  LLMChain
from langchain.prompts.chat import (ChatPromptTemplate,HumanMessagePromptTemplate)
from langchain import HuggingFaceHub
from langchain.chat_models import ChatOpenAI
from trulens_eval import TruChain

full_prompt = HumanMessagePromptTemplate(
    prompt=PromptTemplate(
        template="Please provide detailed helpful response with relevant background information for the following: {prompt}. Provide a complete paragraph of the response",
            input_variables=["prompt"],
        )
    )
chat_prompt_template = ChatPromptTemplate.from_messages([full_prompt])
model = HuggingFaceHub(repo_id='tiiuae/falcon-7b-instruct', model_kwargs={"temperature":0.5})
chain = LLMChain(llm=model, prompt=chat_prompt_template)

from trulens_eval import Feedback, Huggingface, Query
hugs=Huggingface()
f_toxicity=Feedback(hugs.not_toxic).on(text=Query.RecordOutput)

truchain=TruChain(chain,app_id="testapp_validation",feedbacks=[f_toxicity])
llm_response3=truchain("What is Machine Learning and Artificial Intelligence")
display(llm_response3)

The response I got is truncated one, why is that so? How can I get complete response using this code.