Chapter 2 questions

@sgugger How to use a local VLM like llava via ollama in LiteLLMModel?

I tried:

from smolagents import LiteLLMModel, CodeAgent
model = LiteLLMModel(
model_id=“ollama/llava:7b”, # Or try other Ollama-supported models
api_base=“http://127.0.0.1:11434”, # Default Ollama local server
num_ctx=8192,
)

Create a simple agent to test instrumentation

agent = CodeAgent(
tools=,
model=model
)
response = agent.run(“”"
Describe the image and generate a caption for it.
“”“,
images = [Image.open(”/data/hmaurya/hmaurya/arpah_vis_web/temp_img.jpeg")])

but I am getting this error:

python app.py
╭─────────────────────────────────────────── New run ───────────────────────────────────────────╮
│ │
│ Describe the image and generate a caption for it. │
│ │
╰─ LiteLLMModel - ollama/llava:7b ──────────────────────────────────────────────────────────────╯
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ Step 1 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Error in generating model output:
Cannot use images with flatten_messages_as_text=True
[Step 1: Duration 0.40 seconds]
Traceback (most recent call last):
File “/data/hmaurya/hmaurya/anaconda3/envs/agentic_system/lib/python3.10/site-packages/smolagents/agents.py”, line 1225, in step
chat_message: ChatMessage = self.model(
File “/data/hmaurya/hmaurya/anaconda3/envs/agentic_system/lib/python3.10/site-packages/openinference/instrumentation/smolagents/_wrappers.py”, line 287, in call
output_message = wrapped(*args, **kwargs)
File “/data/hmaurya/hmaurya/anaconda3/envs/agentic_system/lib/python3.10/site-packages/smolagents/models.py”, line 897, in call
completion_kwargs = self._prepare_completion_kwargs(
File “/data/hmaurya/hmaurya/anaconda3/envs/agentic_system/lib/python3.10/site-packages/smolagents/models.py”, line 279, in _prepare_completion_kwargs
messages = get_clean_message_list(
File “/data/hmaurya/hmaurya/anaconda3/envs/agentic_system/lib/python3.10/site-packages/smolagents/models.py”, line 202, in get_clean_message_list
assert not flatten_messages_as_text, f"Cannot use images with {flatten_messages_as_text=}"
AssertionError: Cannot use images with flatten_messages_as_text=True

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
File “/data/hmaurya/hmaurya/agentic_system/app.py”, line 60, in
response = agent.run(“”"
File “/data/hmaurya/hmaurya/anaconda3/envs/agentic_system/lib/python3.10/site-packages/openinference/instrumentation/smolagents/_wrappers.py”, line 128, in call
agent_output = wrapped(*args, **kwargs)
File “/data/hmaurya/hmaurya/anaconda3/envs/agentic_system/lib/python3.10/site-packages/smolagents/agents.py”, line 332, in run
return deque(self._run(task=self.task, max_steps=max_steps, images=images), maxlen=1)[0].final_answer
File “/data/hmaurya/hmaurya/anaconda3/envs/agentic_system/lib/python3.10/site-packages/smolagents/agents.py”, line 356, in _run
raise e
File “/data/hmaurya/hmaurya/anaconda3/envs/agentic_system/lib/python3.10/site-packages/smolagents/agents.py”, line 353, in _run
final_answer = self._execute_step(task, action_step)
File “/data/hmaurya/hmaurya/anaconda3/envs/agentic_system/lib/python3.10/site-packages/smolagents/agents.py”, line 376, in _execute_step
final_answer = self.step(memory_step)
File “/data/hmaurya/hmaurya/anaconda3/envs/agentic_system/lib/python3.10/site-packages/openinference/instrumentation/smolagents/_wrappers.py”, line 163, in call
result = wrapped(*args, **kwargs)
File “/data/hmaurya/hmaurya/anaconda3/envs/agentic_system/lib/python3.10/site-packages/smolagents/agents.py”, line 1241, in step
raise AgentGenerationError(f"Error in generating model output:\n{e}", self.logger) from e
smolagents.utils.AgentGenerationError: Error in generating model output:
Cannot use images with flatten_messages_as_text=True

1 Like