Not able to use the uploaded model in Hugginface

I am new to hugginface, i have uploaded a model and now trying to re-use the uploaded model like this
model = AutoModel.from_pretrained('LijinDurairaj/resume_evaluation_model')
but i am getting this following error

OSError                                   Traceback (most recent call last)
/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py in cached_file(path_or_repo_id, filename, cache_dir, force_download, resume_download, proxies, token, revision, local_files_only, subfolder, repo_type, user_agent, _raise_exceptions_for_gated_repo, _raise_exceptions_for_missing_entries, _raise_exceptions_for_connection_errors, _commit_hash, **deprecated_kwargs)
    454         if revision is None:
    455             revision = "main"
--> 456         raise EnvironmentError(
    457             f"{path_or_repo_id} does not appear to have a file named {full_filename}. Checkout "
    458             f"'https://huggingface.co/{path_or_repo_id}/tree/{revision}' for available files."

OSError: LijinDurairaj/resume_evaluation_model does not appear to have a file named config.json. Checkout 'https://huggingface.co/LijinDurairaj/resume_evaluation_model/tree/main' for available files.

My question is

  1. What is config.json, please share me some doc to know how to use it
  2. how to resolve this error

hi @LijinDurairaj
I don’t know which document you’re following, but Sharing pretrained models - Hugging Face NLP Course should help you to save/share your model.

Your repo is missing config.json file, that’s why you can’t re-use it. Normally, if you save your model using the .save_pretrained() method, it will save both the model weights and a config.json file in the specified directory.

If you have uncommitted version of it you can follow this Sharing pretrained models - Hugging Face NLP Course

If you still keep your session with model, you can save config as

model.config.to_json_file("config.json")

thanks for the response @mahmutc ,

i saved my model using the trainer class,
trainer.push_to_hub('./resume_evaluation_model')

—here is my complete code

from transformers import AutoTokenizer, AutoModel, TrainingArguments, Trainer
from datasets import load_dataset, Dataset, concatenate_datasets, DatasetDict
import numpy as np
import datasets
import evaluate
from transformers import AutoTokenizer, AutoModel
import torch
import torch.nn.functional as F
import torch.nn as nn


accuracy_metric = evaluate.load("accuracy")
tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/all-MiniLM-L6-v2')
base_model = AutoModel.from_pretrained('sentence-transformers/all-MiniLM-L6-v2')
resume_data=load_dataset('cnamuangtoun/resume-job-description-fit')

resume_data=resume_data.shuffle(seed=23)


unique_labels=set(resume_data['train']['label']) | set(resume_data['test']['label'])
unique_labels={data:index for index,data in enumerate(set(unique_labels))}



#taking only small size of the data to train
train=resume_data['train']#.train_test_split(test_size=0.02, seed=42)
test=resume_data['test']#.train_test_split(test_size=0.02, seed=42)

mod_data=DatasetDict({
    'train':train,
    'test':test
    })



class modelWithLossFunc(nn.Module):

  def __init__(self,base_model,num_labels):
    super(modelWithLossFunc,self).__init__()
    self.base_model=base_model
    self.classifier=nn.Linear(base_model.config.hidden_size,num_labels)
    self.config=base_model.config

  def forward(self,input_ids,attention_mask,token_type_ids=None,labels=None):
    outputs=self.base_model(input_ids=input_ids,attention_mask=attention_mask,token_type_ids=token_type_ids)
    pooled_output=outputs[1]
    logits=self.classifier(pooled_output)

    loss=None
    if labels is not None:
      loss_fn=nn.CrossEntropyLoss()
      loss=loss_fn(logits.view(-1,self.classifier.out_features),labels.view(-1))
    return (loss,logits) if loss is not None else logits

num_labels=3
model=modelWithLossFunc(base_model,num_labels)
device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)


def encode_label(row):
  row['label_encode']=unique_labels[row['label']]
  return row

def compute_metrics(p):
    predictions, labels = p
    preds = np.argmax(predictions, axis=1)
    accuracy = accuracy_metric.compute(predictions=preds, references=labels)
    return {
        "accuracy": accuracy['accuracy'],
    }

def tokenize_data(data):
  return tokenizer(data['resume_text'],data['job_description_text'],padding='max_length',truncation=True)


mod_data=mod_data.map(encode_label)
tokenized_data=mod_data.map(tokenize_data,batched=True)
mod_data=tokenized_data.rename_columns({'label_encode':'labels'})
mod_data=mod_data.select_columns(['input_ids','token_type_ids','attention_mask','labels'])


training_args=TrainingArguments(
  output_dir='resume_evaluation_model',
  learning_rate=2e-5,
  eval_strategy='epoch'
)
trainer=Trainer(
    model=model,
    args=training_args,
    train_dataset=mod_data['train'],
    eval_dataset=mod_data['test'],
    tokenizer=tokenizer,
    compute_metrics=compute_metrics
)

trainer.train()


from huggingface_hub import notebook_login
notebook_login()

trainer.push_to_hub('./resume_evaluation_model')

quoting from How to create a config.json after saving a model - #4 by BramVanroy

instead of

class modelWithLossFunc(nn.Module):

use

from transformers.modeling_utils import PreTrainedModel
class modelWithLossFunc(PreTrainedModel):

thanks @mahmutc