How to use DeepSparse in Transformer?

Here is the code. Basically it loads langchain llm model.

from typing import List, Optional
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from transformers import AutoModel, AutoTokenizer
from config import Config


class LLMService(LLM):
    max_token: int = 10000
    temperature: float = 0.1
    top_p = 0.9
    history = []
    tokenizer: object = None
    model: object = None

    def __init__(self):
        super().__init__()

    @property
    def _llm_type(self) -> str:
        return "LLM"

    def _call(self,
              prompt: str,
              stop: Optional[List[str]] = None) -> str:
        response, _ = self.model.chat(
            self.tokenizer,
            prompt,
            history=self.history,
            max_length=self.max_token,
            temperature=self.temperature,
        )
        if stop is not None:
            response = enforce_stop_tokens(response, stop)
        self.history = self.history + [[None, response]]
        return response

    def load_model(self, model_name_or_path: str = "ClueAI/ChatYuan-large-v2"):
        
        self.tokenizer = AutoTokenizer.from_pretrained(
            Config.llm_model_name,
            trust_remote_code=True
        )
        self.model = AutoModel.from_pretrained(model_name_or_path, trust_remote_code=True)
        self.model = self.model.eval()

if __name__ == '__main__':
    chatLLM = LLMService()
    chatLLM.load_model()

Usually, if we want to use DeepSparse on llm, we do it like this,

llm = DeepSparse(
        model=MODEL_PATH,
        model_config={"sequence_length": 2048, "trust_remote_code": True},
        generation_config={"max_new_tokens": 300},
    )

But in my case I use transformer. So my question is that how to use DeepSparse in my case to optimize LLMs for CPU inference?

@fifthwheel you can do something like this

from typing import List, Optional
from langchain_community.llms import DeepSparse
from langchain.llms.utils import enforce_stop_tokens

class LLMService():
    model: object = None

    @property
    def _llm_type(self) -> str:
        return "deepsparse"

    def _call(self,
              prompt: str,
              stop: Optional[List[str]] = None) -> str:
        response = self.model(prompt)
        if stop is not None:
            response = enforce_stop_tokens(response, stop)
        return response

    def load_model(self, model_name_or_path: str = "hf:neuralmagic/mpt-7b-chat-pruned50-quant"):
        self.model = DeepSparse(
        model=model_name_or_path,
        model_config={"sequence_length": 2048},
        generation_config={"max_new_tokens": 300},
    )
        

if __name__ == '__main__':
    chatLLM = LLMService()
    chatLLM.load_model()