MAX_PIXELS=1003520 \ NPROC_PER_NODE=8 \ swift rlhf \ --rlhf_type grpo \ --model Qwen/Qwen2.5-VL-7B-Instruct \ --train_type lora \ --dataset AI-ModelScope/chartqa_digit_r1v_format \ --torch_dtype bfloat16 \ --system examples/train/grpo/prompt.txt \ --num_train_epochs 1 \ --max_length 2048 \ --per_device_train_batch_size 1 \ --per_device_eval_batch_size 1 \ --learning_rate 1e-6 \ --save_total_limit 2 \ --logging_steps 5 \ --output_dir output \ --gradient_accumulation_steps 1 \ --warmup_ratio 0.05 \ --dataloader_num_workers 4 \ --max_completion_length 1024 \ --reward_funcs accuracy format \ --num_generations 8 \ --use_vllm true \ --vllm_gpu_memory_utilization 0.5 \ --sleep_level 1 \ --deepspeed zero3 \ --num_infer_workers 8 \ --tensor_parallel_size 4 \ --temperature 1.0 \ --top_p 0.85