We are facing the above error, i give code, please debug the code to correct manner

raise RuntimeError(

RuntimeError: Failed to import transformers.models.blip.modeling_tf_blip because of the following error (look up to see its traceback):
module ‘tensorflow._api.v2.compat.v2.internal’ has no attribute 'register_load_context_function’0

code:
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from dotenv import find_dotenv, load_dotenv
from transformers import pipeline
import requests
import os

Load environment variables

load_dotenv(find_dotenv())
HUGGINGFACE_API_TOKEN = os.getenv(“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”)

Function for image to text conversion

def img2text(path):
img_to_text = pipeline(“image-to-text”, model=“Salesforce/blip-image-captioning-base”)
text = img_to_text(path)[0][‘generated_text’]
return text

Function for story generation

def story_generator(scenario):
template = “”"
You are an expert kids story teller;
You can generate short stories based on a simple narrative
Your story should be more than 50 words.

CONTEXT: {scenario}
STORY:
"""
prompt = PromptTemplate(template=template, input_variables=["scenario"])
story_llm = LLMChain(llm="gpt-3.5-turbo", prompt=prompt, verbose=True)

story = story_llm.predict(scenario=scenario)
return story

Function for text to speech conversion

def text2speech(msg):
API_URL = “https://api-inference.huggingface.co/models/espnet/kan-bayashi_ljspeech_vits”
headers = {“Authorization”: f"Bearer {HUGGINGFACE_API_TOKEN}"}
payloads = {“inputs”: msg}
response = requests.post(API_URL, headers=headers, json=payloads)

with open('audio.flac', 'wb') as f:
    f.write(response.content)

Main code execution

scenario = img2text(r"C:\Users\dnave\Desktop\sample pro\image.jpg")

Convert image to text

story = story_generator(scenario) # Generate a story
text2speech(story) # Convert generated text to audio