Load fsdp+lora checkpoint error

File “/home/wuzh/zd/GIT-Mol/crystal-text-llm-main/llama_finetune.py”, line 522, in
main(args)
File “/home/wuzh/zd/GIT-Mol/crystal-text-llm-main/llama_finetune.py”, line 481, in main
train_result = trainer.train(resume_from_checkpoint=args.resume_dir)
File “/work/zd/anaconda/fsdp/lib/python3.9/site-packages/transformers/trainer.py”, line 1932, in train
return inner_training_loop(
File “/work/zd/anaconda/fsdp/lib/python3.9/site-packages/transformers/trainer.py”, line 2268, in _inner_training_loop
tr_loss_step = self.training_step(model, inputs)
File “/work/zd/anaconda/fsdp/lib/python3.9/site-packages/transformers/trainer.py”, line 3307, in training_step
loss = self.compute_loss(model, inputs)
File “/work/zd/anaconda/fsdp/lib/python3.9/site-packages/transformers/trainer.py”, line 3338, in compute_loss
outputs = model(**inputs)
File “/work/zd/anaconda/fsdp/lib/python3.9/site-packages/torch/nn/modules/module.py”, line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File “/work/zd/anaconda/fsdp/lib/python3.9/site-packages/torch/nn/modules/module.py”, line 1527, in _call_impl
return forward_call(*args, **kwargs)
File “/work/zd/anaconda/fsdp/lib/python3.9/site-packages/accelerate/utils/operations.py”, line 819, in forward
return model_forward(*args, **kwargs)
File “/work/zd/anaconda/fsdp/lib/python3.9/site-packages/accelerate/utils/operations.py”, line 807, in call
return convert_to_fp32(self.model_forward(*args, **kwargs))
File “/work/zd/anaconda/fsdp/lib/python3.9/site-packages/torch/amp/autocast_mode.py”, line 16, in decorate_autocast
return func(*args, **kwargs)
File “/work/zd/anaconda/fsdp/lib/python3.9/site-packages/torch/_dynamo/eval_frame.py”, line 328, in _fn
return fn(*args, **kwargs)
File “/work/zd/anaconda/fsdp/lib/python3.9/site-packages/torch/_dynamo/external_utils.py”, line 17, in inner
return fn(*args, **kwargs)
File “/work/zd/anaconda/fsdp/lib/python3.9/site-packages/torch/nn/modules/module.py”, line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File “/work/zd/anaconda/fsdp/lib/python3.9/site-packages/torch/nn/modules/module.py”, line 1527, in _call_impl
return forward_call(*args, **kwargs)
File “/work/zd/anaconda/fsdp/lib/python3.9/site-packages/torch/distributed/fsdp/fully_sharded_data_parallel.py”, line 823, in forward
args, kwargs = _root_pre_forward(self, self, args, kwargs)
File “/work/zd/anaconda/fsdp/lib/python3.9/site-packages/torch/distributed/fsdp/_runtime_utils.py”, line 558, in _root_pre_forward
_lazy_init(state, module)
File “/work/zd/anaconda/fsdp/lib/python3.9/site-packages/torch/distributed/fsdp/_runtime_utils.py”, line 173, in _lazy_init
_share_state_and_init_handle_attrs(state, root_module)
File “/work/zd/anaconda/fsdp/lib/python3.9/site-packages/torch/distributed/fsdp/_runtime_utils.py”, line 261, in _share_state_and_init_handle_attrs
_p_assert(
File “/work/zd/anaconda/fsdp/lib/python3.9/site-packages/torch/distributed/utils.py”, line 145, in _p_assert
traceback.print_stack()
Non-root FSDP instance’s _is_root should not have been set yet or should have been set to False

when using the “train_result = trainer.train(resume_from_checkpoint=args.resume_dir)” to load the checkpoint.
here is the library code:
"_p_assert(
fsdp_state._is_root is None or not fsdp_state._is_root,
“Non-root FSDP instance’s _is_root should not have been "
“set yet or should have been set to False”,
)
fsdp_state._is_root = False
fsdp_state._unshard_stream = root_state._unshard_stream”