--- datasets: - IPEC-COMMUNITY/libero_10_no_noops_1.0.0_lerobot base_model: - nvidia/Eagle2-2B tags: - vision-language-model - manipulation - robotics pipeline_tag: robotics --- # Model Card for InstructVLA LIBERO-10 - checkpoints: the model in `.pt` format - eval: the evaluation results with 3 random seeds - dataset_statistics.json: the normalization statistics for the dataset ## Evaluation: ```bash #!/bin/bash CKPT_LIST=( "path/to/checkpoints/step-025500-epoch-64-loss=0.0361.pt" ) # Loop over the checkpoint list and GPUs for i in "${!CKPT_LIST[@]}"; do GPU_ID=$((i % 8)) # Cycle through GPUs 0-7 CHECKPOINT="${CKPT_LIST[$i]}" # Run the evaluation script for each checkpoint and GPU CUDA_VISIBLE_DEVICES=$GPU_ID python deploy/libero/run_libero_eval.py \ --model_family instruct_vla \ --pretrained_checkpoint "$CHECKPOINT" \ --task_suite_name libero_10 \ --local_log_dir Libero/release_ensemble \ --use_length -1 \ --center_crop True & # --use_length == -1 : execute the ensembled action # --use_length >= 1 : execute action_chunk[0:use_length] # For this checkpoint, you should use action ensemble. sleep 5 done # Wait for all background jobs to finish wait ```