Hello,
Iâve tried to implement docquery with different models like impira/layoutlm-invoices but I always have the same error (on collab notebook or pycharm, windows or ubuntu always the same):
Code :
from docquery import document, pipeline
p = pipeline(task=âdocument-question-answeringâ, model = âimpira/layoutlm-invoicesâ)
doc = document.load_document(file)
s = p(question=âWhat is the invoice number?â, **doc.context)
Error :
Exception Traceback (most recent call last)
Cell In[76], line 5
3 p = pipeline(task=âdocument-question-answeringâ, model = âimpira/layoutlm-invoicesâ)
4 doc = document.load_document(file)
----> 5 s = p(question=âWhat is the invoice number?â, **doc.context)
File ~\Documents\GitHub\Llama\venv\lib\site-packages\docquery\ext\pipeline_document_question_answering.py:232, in DocumentQuestionAnsweringPipeline.call(self, image, question, **kwargs)
229 else:
230 normalized_images = [(image, None)]
â 232 return super().call({âquestionâ: question, âpagesâ: normalized_images}, **kwargs)
File ~\Documents\GitHub\Llama\venv\lib\site-packages\transformers\pipelines\base.py:1111, in Pipeline.call(self, inputs, num_workers, batch_size, *args, **kwargs)
1109 return self.iterate(inputs, preprocess_params, forward_params, postprocess_params)
1110 elif self.framework == âptâ and isinstance(self, ChunkPipeline):
â 1111 return next(
1112 iter(
1113 self.get_iterator(
1114 [inputs], num_workers, batch_size, preprocess_params, forward_params, postprocess_params
1115 )
1116 )
1117 )
1118 else:
1119 return self.run_single(inputs, preprocess_params, forward_params, postprocess_params)
File ~\Documents\GitHub\Llama\venv\lib\site-packages\transformers\pipelines\pt_utils.py:124, in PipelineIterator.next(self)
121 return self.loader_batch_item()
123 # Weâre out of items within a batch
â 124 item = next(self.iterator)
125 processed = self.infer(item, **self.params)
126 # We now have a batch of âinferred thingsâ.
File ~\Documents\GitHub\Llama\venv\lib\site-packages\transformers\pipelines\pt_utils.py:291, in PipelinePackIterator.next(self)
289 else:
290 item = processed
â 291 is_last = item.pop(âis_lastâ)
292 accumulator.append(item)
293 return accumulator
File ~\Documents\GitHub\Llama\venv\lib\site-packages\transformers\utils\generic.py:310, in ModelOutput.pop(self, *args, **kwargs)
309 def pop(self, *args, **kwargs):
â 310 raise Exception(f"You cannot use pop
on a {self.class.name} instance.")
Exception: You cannot use pop
on a ModelOutput instance.
Thank you for your help.