Why I am getting this problem while running any of the GGUF model instead of with .bin model

from langchain.document_loaders.csv_loader import CSVLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.llms import CTransformers
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
import sys

DB_FAISS_PATH=‘vectorstore/db_faiss’
loader=CSVLoader(file_path=‘data/2019.csv’,encoding=‘utf-8’,csv_args={‘delimiter’:‘,’})
data=loader.load()
#print(data)

text_splitter=RecursiveCharacterTextSplitter(chunk_size=500,chunk_overlap=20)
text_chunks=text_splitter.split_documents(data)

print(len(text_chunks))
print(text_chunks)

embeddings = HuggingFaceEmbeddings(model_name=‘sentence-transformers/all-MiniLM-L6-v2’)

docsearch=FAISS.from_documents(text_chunks,embeddings)
docsearch.save_local(DB_FAISS_PATH)

#query = ‘what is the value of GDP per capita of finland provided in the data’
#docs=docsearch.similarity_search(query,k=3)

#print(“Result”,docs)

llm = CTransformers(model=“models/mistral-7b-v0.1.Q2_K.gguf”,
model_type=“llama”,
max_new_tokens=512,
temperature=0.1)

qa = ConversationalRetrievalChain.from_llm(llm, retriever=docsearch.as_retriever())

while True:
chat_history =
#query = “What is the value of GDP per capita of Finland provided in the data?”
query = input(f"Input Prompt: ")
if query == ‘exit’:
print(‘Exiting’)
sys.exit()
if query == ‘’:
continue
result = qa({“question”:query, “chat_history”:chat_history})
print("Response: ", result[‘answer’])

terminal Response
error loading model: unknown (magic, version) combination: 46554747, 00000002; is this really a GGML file?
llama_init_from_file: failed to load model
Traceback (most recent call last):
File “d:\Clarista\dockerLLma\script.py”, line 31, in
llm = CTransformers(model=“models/mistral-7b-v0.1.Q2_K.gguf”,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File “C:\Users\aksha\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain\load\serializable.py”, line 74, in init
super().init(**kwargs)
File “pydantic\main.py”, line 339, in pydantic.main.BaseModel.init
File “pydantic\main.py”, line 1102, in pydantic.main.validate_model
File “C:\Users\aksha\AppData\Local\Programs\Python\Python311\Lib\site-packages\langchain\llms\ctransformers.py”, line 70, in validate_environment
values[“client”] = AutoModelForCausalLM.from_pretrained(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File “C:\Users\aksha\AppData\Local\Programs\Python\Python311\Lib\site-packages\ctransformers\hub.py”, line 157, in from_pretrained
return LLM(
^^^^
File “C:\Users\aksha\AppData\Local\Programs\Python\Python311\Lib\site-packages\ctransformers\llm.py”, line 205, in init
raise RuntimeError(
RuntimeError: Failed to create LLM ‘llama’ from ‘models/mistral-7b-v0.1.Q2_K.gguf’.

model type should be mistral