Using a BertTokenizer when training a RobertaForMaskedLM

Hello, I want to train from scrath a RobertaForMaskedLM model. But I need a character level tokenizer and I found one already, perfect for me. So I am wondering,
can I re-use it?
This is my RobertaForMaskedLM config:

{"architectures": ["RobertaForMaskedLM"], 
"attention_probs_dropout_prob": 0.1, 
"hidden_act": "gelu",
 "hidden_dropout_prob": 0.1,
 "hidden_size": 768, "initializer_range": 0.02, 
"intermediate_size": 3072, 
"layer_norm_eps": 1e-05, 
"max_position_embeddings": 40000, 
"model_type": "roberta",
 "num_attention_heads": 12,
 "num_hidden_layers": 12,
 "type_vocab_size": 1,
 "vocab_size": 30} # edited to be the same as the tokenizer

This is the BertTokenizer config:

{
  "version": "1.0",
  "truncation": {
    "direction": "Right",
    "max_length": 512,
    "strategy": "LongestFirst",
    "stride": 0
  },
  "padding": null,
  "added_tokens": [
    {
      "id": 0,
      "content": "[PAD]",
      "single_word": false,
      "lstrip": false,
      "rstrip": false,
      "normalized": false,
      "special": true
    },
    {
      "id": 1,
      "content": "[UNK]",
      "single_word": false,
      "lstrip": false,
      "rstrip": false,
      "normalized": false,
      "special": true
    },
    {
      "id": 2,
      "content": "[CLS]",
      "single_word": false,
      "lstrip": false,
      "rstrip": false,
      "normalized": false,
      "special": true
    },
    {
      "id": 3,
      "content": "[SEP]",
      "single_word": false,
      "lstrip": false,
      "rstrip": false,
      "normalized": false,
      "special": true
    },
    {
      "id": 4,
      "content": "[MASK]",
      "single_word": false,
      "lstrip": false,
      "rstrip": false,
      "normalized": false,
      "special": true
    }
  ],
  "normalizer": {
    "type": "BertNormalizer",
    "clean_text": true,
    "handle_chinese_chars": true,
    "strip_accents": null,
    "lowercase": false
  },
  "pre_tokenizer": {
    "type": "BertPreTokenizer"
  },
  "post_processor": {
    "type": "TemplateProcessing",
    "single": [
      {
        "SpecialToken": {
          "id": "[CLS]",
          "type_id": 0
        }
      },
      {
        "Sequence": {
          "id": "A",
          "type_id": 0
        }
      },
      {
        "SpecialToken": {
          "id": "[SEP]",
          "type_id": 0
        }
      }
    ],
    "pair": [
      {
        "SpecialToken": {
          "id": "[CLS]",
          "type_id": 0
        }
      },
      {
        "Sequence": {
          "id": "A",
          "type_id": 0
        }
      },
      {
        "SpecialToken": {
          "id": "[SEP]",
          "type_id": 0
        }
      },
      {
        "Sequence": {
          "id": "B",
          "type_id": 1
        }
      },
      {
        "SpecialToken": {
          "id": "[SEP]",
          "type_id": 1
        }
      }
    ],
    "special_tokens": {
      "[CLS]": {
        "id": "[CLS]",
        "ids": [
          2
        ],
        "tokens": [
          "[CLS]"
        ]
      },
      "[SEP]": {
        "id": "[SEP]",
        "ids": [
          3
        ],
        "tokens": [
          "[SEP]"
        ]
      }
    }
  },
  "decoder": {
    "type": "WordPiece",
    "prefix": "##",
    "cleanup": true
  },
  "model": {
    "type": "WordPiece",
    "unk_token": "[UNK]",
    "continuing_subword_prefix": "##",
    "max_input_chars_per_word": 100,
    "vocab": {
      "[PAD]": 0,
      "[UNK]": 1,
      "[CLS]": 2,
      "[SEP]": 3,
      "[MASK]": 4,
      "L": 5,
      "A": 6,
      "G": 7,
      "V": 8,
      "E": 9,
      "S": 10,
      "I": 11,
      "K": 12,
      "R": 13,
      "D": 14,
      "T": 15,
      "P": 16,
      "N": 17,
      "Q": 18,
      "F": 19,
      "Y": 20,
      "M": 21,
      "H": 22,
      "C": 23,
      "W": 24,
      "X": 25,
      "U": 26,
      "B": 27,
      "Z": 28,
      "O": 29
    }
  }
}

Thank you very much! If you now how to create a character based tokenizer let me know please.