While device=0 so pipeline is only using cudo = 0 , is there is way to use all gpus on a ssh server?

tf.debugging.set_log_device_placement(True)
gpus = tf.config.list_logical_devices('GPU')
strategy = tf.distribute.MirroredStrategy(gpus)
with strategy.scope():
    # classifer
    classifier = pipeline("zero-shot-classification", model="facebook/bart-large-mnli" , device=0)
    #  run classifier on batches of dataframe
    def run_classifier(df_list):
        tqdm.pandas(desc='Processing Dataframe')
        for i in range(len(df_list)):
            df_list[i]['label'] = df_list[i]['Translation'].progress_apply(lambda x :(classifier(x, candidate_labels=labels.candidate_labels , multi_label= True )))
        return df_list

run_classifier(batch_df(df))

i am sorry i mean CUDA