Dataset Viewer
Auto-converted to Parquet
data_source
stringclasses
1 value
reward_model
dict
extra_info
dict
prompt
listlengths
2
2
ability
stringclasses
1 value
env_name
stringclasses
1 value
images
images listlengths
1
1
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The steel ladder is on the left side of the wagon." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_79958.jpg", "item_id": "sa_79958", "options": [ "The steel ladder is on the left side of the wagon.", "The steel ladder is on the right side of the wagon." ], "prompt_preset": "seal_zoom", "question": "Question: Is the steel ladder on the left or right side of the wagon?\nOptions: \nA. The steel ladder is on the left side of the wagon.\nB. The steel ladder is on the right side of the wagon.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the steel ladder on the left or right side of the wagon?\nOptions: \nA. The steel ladder is on the left side of the wagon.\nB. The steel ladder is on the right side of the wagon.\n\nOptions:\nA. The steel ladder is on the left side of the wagon.\nB. The steel ladder is on the right side of the wagon.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The red vehicle is on the right side of the sculpture." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_5465.jpg", "item_id": "sa_5465", "options": [ "The red vehicle is on the right side of the sculpture.", "The red vehicle is on the left side of the sculpture." ], "prompt_preset": "seal_zoom", "question": "Question: Is the red vehicle on the left or right side of the sculpture?\nOptions: \nA. The red vehicle is on the right side of the sculpture.\nB. The red vehicle is on the left side of the sculpture.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the red vehicle on the left or right side of the sculpture?\nOptions: \nA. The red vehicle is on the right side of the sculpture.\nB. The red vehicle is on the left side of the sculpture.\n\nOptions:\nA. The red vehicle is on the right side of the sculpture.\nB. The red vehicle is on the left side of the sculpture.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The cyclist is on the left side of the woman's handbag." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_23098.jpg", "item_id": "sa_23098", "options": [ "The cyclist is on the left side of the woman's handbag.", "The cyclist is on the right side of the woman's handbag." ], "prompt_preset": "seal_zoom", "question": "Question: Is the cyclist on the left or right side of the woman's handbag?\nOptions: \nA. The cyclist is on the left side of the woman's handbag.\nB. The cyclist is on the right side of the woman's handbag.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the cyclist on the left or right side of the woman's handbag?\nOptions: \nA. The cyclist is on the left side of the woman's handbag.\nB. The cyclist is on the right side of the woman's handbag.\n\nOptions:\nA. The cyclist is on the left side of the woman's handbag.\nB. The cyclist is on the right side of the woman's handbag.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The volleyball is on the left side of the man with white cap." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_53377.jpg", "item_id": "sa_53377", "options": [ "The volleyball is on the left side of the man with white cap.", "The volleyball is on the right side of the man with white cap." ], "prompt_preset": "seal_zoom", "question": "Question: Is the volleyball on the left or right side of the man with white cap?\nOptions: \nA. The volleyball is on the left side of the man with white cap.\nB. The volleyball is on the right side of the man with white cap.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the volleyball on the left or right side of the man with white cap?\nOptions: \nA. The volleyball is on the left side of the man with white cap.\nB. The volleyball is on the right side of the man with white cap.\n\nOptions:\nA. The volleyball is on the left side of the man with white cap.\nB. The volleyball is on the right side of the man with white cap.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The dog is on the right side of the crosswalk." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_5613.jpg", "item_id": "sa_5613", "options": [ "The dog is on the right side of the crosswalk.", "The dog is on the left side of the crosswalk." ], "prompt_preset": "seal_zoom", "question": "Question: Is the dog on the left or right side of the crosswalk?\nOptions: \nA. The dog is on the right side of the crosswalk.\nB. The dog is on the left side of the crosswalk.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the dog on the left or right side of the crosswalk?\nOptions: \nA. The dog is on the right side of the crosswalk.\nB. The dog is on the left side of the crosswalk.\n\nOptions:\nA. The dog is on the right side of the crosswalk.\nB. The dog is on the left side of the crosswalk.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The red suitcase is on the left side of the red stool." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_57152.jpg", "item_id": "sa_57152", "options": [ "The red suitcase is on the left side of the red stool.", "The red suitcase is on the right side of the red stool." ], "prompt_preset": "seal_zoom", "question": "Question: Is the red suitcase on the left or right side of the red stool?\nOptions: \nA. The red suitcase is on the left side of the red stool.\nB. The red suitcase is on the right side of the red stool.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the red suitcase on the left or right side of the red stool?\nOptions: \nA. The red suitcase is on the left side of the red stool.\nB. The red suitcase is on the right side of the red stool.\n\nOptions:\nA. The red suitcase is on the left side of the red stool.\nB. The red suitcase is on the right side of the red stool.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The white truck is on the right side of the red truck." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_65804.jpg", "item_id": "sa_65804", "options": [ "The white truck is on the right side of the red truck.", "The white truck is on the left side of the red truck." ], "prompt_preset": "seal_zoom", "question": "Question: Is the white truck on the left or right side of the red truck?\nOptions: \nA. The white truck is on the right side of the red truck.\nB. The white truck is on the left side of the red truck.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the white truck on the left or right side of the red truck?\nOptions: \nA. The white truck is on the right side of the red truck.\nB. The white truck is on the left side of the red truck.\n\nOptions:\nA. The white truck is on the right side of the red truck.\nB. The white truck is on the left side of the red truck.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The yellow umbrella is on the left side of the pink umbrella." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_40555.jpg", "item_id": "sa_40555", "options": [ "The yellow umbrella is on the left side of the pink umbrella.", "The yellow umbrella is on the right side of the pink umbrella." ], "prompt_preset": "seal_zoom", "question": "Question: Is the yellow umbrella on the left or right side of the pink umbrella?\nOptions: \nA. The yellow umbrella is on the left side of the pink umbrella.\nB. The yellow umbrella is on the right side of the pink umbrella.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the yellow umbrella on the left or right side of the pink umbrella?\nOptions: \nA. The yellow umbrella is on the left side of the pink umbrella.\nB. The yellow umbrella is on the right side of the pink umbrella.\n\nOptions:\nA. The yellow umbrella is on the left side of the pink umbrella.\nB. The yellow umbrella is on the right side of the pink umbrella.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The soccer ball is on the right side of the water dispenser." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_40188.jpg", "item_id": "sa_40188", "options": [ "The soccer ball is on the right side of the water dispenser.", "The soccer ball is on the left side of the water dispenser." ], "prompt_preset": "seal_zoom", "question": "Question: Is the soccer ball on the left or right side of the water dispenser?\nOptions: \nA. The soccer ball is on the right side of the water dispenser.\nB. The soccer ball is on the left side of the water dispenser.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the soccer ball on the left or right side of the water dispenser?\nOptions: \nA. The soccer ball is on the right side of the water dispenser.\nB. The soccer ball is on the left side of the water dispenser.\n\nOptions:\nA. The soccer ball is on the right side of the water dispenser.\nB. The soccer ball is on the left side of the water dispenser.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The motorcycle is on the left side of the street." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_57960.jpg", "item_id": "sa_57960", "options": [ "The motorcycle is on the left side of the street.", "The motorcycle is on the right side of the street." ], "prompt_preset": "seal_zoom", "question": "Question: Is the motorcycle on the left or right side of the street?\nOptions: \nA. The motorcycle is on the left side of the street.\nB. The motorcycle is on the right side of the street.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the motorcycle on the left or right side of the street?\nOptions: \nA. The motorcycle is on the left side of the street.\nB. The motorcycle is on the right side of the street.\n\nOptions:\nA. The motorcycle is on the left side of the street.\nB. The motorcycle is on the right side of the street.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The yellow car is on the left side of the pool." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_65606.jpg", "item_id": "sa_65606", "options": [ "The yellow car is on the left side of the pool.", "The yellow car is on the right side of the pool." ], "prompt_preset": "seal_zoom", "question": "Question: Is the yellow car on the left or right side of the pool?\nOptions: \nA. The yellow car is on the left side of the pool.\nB. The yellow car is on the right side of the pool.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the yellow car on the left or right side of the pool?\nOptions: \nA. The yellow car is on the left side of the pool.\nB. The yellow car is on the right side of the pool.\n\nOptions:\nA. The yellow car is on the left side of the pool.\nB. The yellow car is on the right side of the pool.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The wheelchair is on the left side of the lamp post." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_23298.jpg", "item_id": "sa_23298", "options": [ "The wheelchair is on the left side of the lamp post.", "The wheelchair is on the right side of the lamp post." ], "prompt_preset": "seal_zoom", "question": "Question: Is the wheelchair on the left or right side of the lamp post?\nOptions: \nA. The wheelchair is on the left side of the lamp post.\nB. The wheelchair is on the right side of the lamp post.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the wheelchair on the left or right side of the lamp post?\nOptions: \nA. The wheelchair is on the left side of the lamp post.\nB. The wheelchair is on the right side of the lamp post.\n\nOptions:\nA. The wheelchair is on the left side of the lamp post.\nB. The wheelchair is on the right side of the lamp post.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The dog is on the right side of the river." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_24031.jpg", "item_id": "sa_24031", "options": [ "The dog is on the right side of the river.", "The dog is on the left side of the river." ], "prompt_preset": "seal_zoom", "question": "Question: Is the dog on the left or right side of the river?\nOptions: \nA. The dog is on the right side of the river.\nB. The dog is on the left side of the river.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the dog on the left or right side of the river?\nOptions: \nA. The dog is on the right side of the river.\nB. The dog is on the left side of the river.\n\nOptions:\nA. The dog is on the right side of the river.\nB. The dog is on the left side of the river.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The cyclist is on the right side of the sculpture." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_41869.jpg", "item_id": "sa_41869", "options": [ "The cyclist is on the right side of the sculpture.", "The cyclist is on the left side of the sculpture." ], "prompt_preset": "seal_zoom", "question": "Question: Is the cyclist on the left or right side of the sculpture?\nOptions: \nA. The cyclist is on the right side of the sculpture.\nB. The cyclist is on the left side of the sculpture.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the cyclist on the left or right side of the sculpture?\nOptions: \nA. The cyclist is on the right side of the sculpture.\nB. The cyclist is on the left side of the sculpture.\n\nOptions:\nA. The cyclist is on the right side of the sculpture.\nB. The cyclist is on the left side of the sculpture.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The switch is on the right side of the towel." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_62287.jpg", "item_id": "sa_62287", "options": [ "The switch is on the right side of the towel.", "The switch is on the left side of the towel." ], "prompt_preset": "seal_zoom", "question": "Question: Is the switch on the left or right side of the towel?\nOptions: \nA. The switch is on the right side of the towel.\nB. The switch is on the left side of the towel.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the switch on the left or right side of the towel?\nOptions: \nA. The switch is on the right side of the towel.\nB. The switch is on the left side of the towel.\n\nOptions:\nA. The switch is on the right side of the towel.\nB. The switch is on the left side of the towel.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The blue bench is on the left side of the green door." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_80084.jpg", "item_id": "sa_80084", "options": [ "The blue bench is on the left side of the green door.", "The blue bench is on the right side of the green door." ], "prompt_preset": "seal_zoom", "question": "Question: Is the blue bench on the left or right side of the green door?\nOptions: \nA. The blue bench is on the left side of the green door.\nB. The blue bench is on the right side of the green door.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the blue bench on the left or right side of the green door?\nOptions: \nA. The blue bench is on the left side of the green door.\nB. The blue bench is on the right side of the green door.\n\nOptions:\nA. The blue bench is on the left side of the green door.\nB. The blue bench is on the right side of the green door.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The suitcase is on the left side of the river." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_36787.jpg", "item_id": "sa_36787", "options": [ "The suitcase is on the left side of the river.", "The suitcase is on the right side of the river." ], "prompt_preset": "seal_zoom", "question": "Question: Is the suitcase on the left or right side of the river?\nOptions: \nA. The suitcase is on the left side of the river.\nB. The suitcase is on the right side of the river.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the suitcase on the left or right side of the river?\nOptions: \nA. The suitcase is on the left side of the river.\nB. The suitcase is on the right side of the river.\n\nOptions:\nA. The suitcase is on the left side of the river.\nB. The suitcase is on the right side of the river.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The shovel is on the right side of the house." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_31784.jpg", "item_id": "sa_31784", "options": [ "The shovel is on the right side of the house.", "The shovel is on the left side of the house." ], "prompt_preset": "seal_zoom", "question": "Question: Is the shovel on the left or right side of the house?\nOptions: \nA. The shovel is on the right side of the house.\nB. The shovel is on the left side of the house.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the shovel on the left or right side of the house?\nOptions: \nA. The shovel is on the right side of the house.\nB. The shovel is on the left side of the house.\n\nOptions:\nA. The shovel is on the right side of the house.\nB. The shovel is on the left side of the house.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The yellow trash can is on the right side of the motorcycle." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_3053.jpg", "item_id": "sa_3053", "options": [ "The yellow trash can is on the right side of the motorcycle.", "The yellow trash can is on the left side of the motorcycle." ], "prompt_preset": "seal_zoom", "question": "Question: Is the yellow trash can on the left or right side of the motorcycle?\nOptions: \nA. The yellow trash can is on the right side of the motorcycle.\nB. The yellow trash can is on the left side of the motorcycle.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the yellow trash can on the left or right side of the motorcycle?\nOptions: \nA. The yellow trash can is on the right side of the motorcycle.\nB. The yellow trash can is on the left side of the motorcycle.\n\nOptions:\nA. The yellow trash can is on the right side of the motorcycle.\nB. The yellow trash can is on the left side of the motorcycle.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The blue luggage is on the right side of the bus." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_83235.jpg", "item_id": "sa_83235", "options": [ "The blue luggage is on the right side of the bus.", "The blue luggage is on the left side of the bus." ], "prompt_preset": "seal_zoom", "question": "Question: Is the blue luggage on the left or right side of the bus?\nOptions: \nA. The blue luggage is on the right side of the bus.\nB. The blue luggage is on the left side of the bus.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the blue luggage on the left or right side of the bus?\nOptions: \nA. The blue luggage is on the right side of the bus.\nB. The blue luggage is on the left side of the bus.\n\nOptions:\nA. The blue luggage is on the right side of the bus.\nB. The blue luggage is on the left side of the bus.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The wristband is on the left side of the dog." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_7826.jpg", "item_id": "sa_7826", "options": [ "The wristband is on the left side of the dog.", "The wristband is on the right side of the dog." ], "prompt_preset": "seal_zoom", "question": "Question: Is the wristband on the left or right side of the dog?\nOptions: \nA. The wristband is on the left side of the dog.\nB. The wristband is on the right side of the dog.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the wristband on the left or right side of the dog?\nOptions: \nA. The wristband is on the left side of the dog.\nB. The wristband is on the right side of the dog.\n\nOptions:\nA. The wristband is on the left side of the dog.\nB. The wristband is on the right side of the dog.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The telephone booth is on the right side of the yellow van." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_34785.jpg", "item_id": "sa_34785", "options": [ "The telephone booth is on the right side of the yellow van.", "The telephone booth is on the left side of the yellow van." ], "prompt_preset": "seal_zoom", "question": "Question: Is the telephone booth on the left or right side of the yellow van?\nOptions: \nA. The telephone booth is on the right side of the yellow van.\nB. The telephone booth is on the left side of the yellow van.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the telephone booth on the left or right side of the yellow van?\nOptions: \nA. The telephone booth is on the right side of the yellow van.\nB. The telephone booth is on the left side of the yellow van.\n\nOptions:\nA. The telephone booth is on the right side of the yellow van.\nB. The telephone booth is on the left side of the yellow van.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The bucket is on the right side of the plunger." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_56362.jpg", "item_id": "sa_56362", "options": [ "The bucket is on the right side of the plunger.", "The bucket is on the left side of the plunger." ], "prompt_preset": "seal_zoom", "question": "Question: Is the bucket on the left or right side of the plunger?\nOptions: \nA. The bucket is on the right side of the plunger.\nB. The bucket is on the left side of the plunger.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the bucket on the left or right side of the plunger?\nOptions: \nA. The bucket is on the right side of the plunger.\nB. The bucket is on the left side of the plunger.\n\nOptions:\nA. The bucket is on the right side of the plunger.\nB. The bucket is on the left side of the plunger.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The vehicle is on the left side of the blue umbrella." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_40977.jpg", "item_id": "sa_40977", "options": [ "The vehicle is on the left side of the blue umbrella.", "The vehicle is on the right side of the blue umbrella." ], "prompt_preset": "seal_zoom", "question": "Question: Is the vehicle on the left or right side of the blue umbrella?\nOptions: \nA. The vehicle is on the left side of the blue umbrella.\nB. The vehicle is on the right side of the blue umbrella.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the vehicle on the left or right side of the blue umbrella?\nOptions: \nA. The vehicle is on the left side of the blue umbrella.\nB. The vehicle is on the right side of the blue umbrella.\n\nOptions:\nA. The vehicle is on the left side of the blue umbrella.\nB. The vehicle is on the right side of the blue umbrella.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The shopping cart is on the left side of the dog." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_59351.jpg", "item_id": "sa_59351", "options": [ "The shopping cart is on the left side of the dog.", "The shopping cart is on the right side of the dog." ], "prompt_preset": "seal_zoom", "question": "Question: Is the shopping cart on the left or right side of the dog?\nOptions: \nA. The shopping cart is on the left side of the dog.\nB. The shopping cart is on the right side of the dog.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the shopping cart on the left or right side of the dog?\nOptions: \nA. The shopping cart is on the left side of the dog.\nB. The shopping cart is on the right side of the dog.\n\nOptions:\nA. The shopping cart is on the left side of the dog.\nB. The shopping cart is on the right side of the dog.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The man with green shorts is on the left side of the blue boat." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_31038.jpg", "item_id": "sa_31038", "options": [ "The man with green shorts is on the left side of the blue boat.", "The man with green shorts is on the right side of the blue boat." ], "prompt_preset": "seal_zoom", "question": "Question: Is the man with green shorts on the left or right side of the blue boat?\nOptions: \nA. The man with green shorts is on the left side of the blue boat.\nB. The man with green shorts is on the right side of the blue boat.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the man with green shorts on the left or right side of the blue boat?\nOptions: \nA. The man with green shorts is on the left side of the blue boat.\nB. The man with green shorts is on the right side of the blue boat.\n\nOptions:\nA. The man with green shorts is on the left side of the blue boat.\nB. The man with green shorts is on the right side of the blue boat.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The dog is on the left side of the scooter." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_3194.jpg", "item_id": "sa_3194", "options": [ "The dog is on the left side of the scooter.", "The dog is on the right side of the scooter." ], "prompt_preset": "seal_zoom", "question": "Question: Is the dog on the left or right side of the scooter?\nOptions: \nA. The dog is on the left side of the scooter.\nB. The dog is on the right side of the scooter.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the dog on the left or right side of the scooter?\nOptions: \nA. The dog is on the left side of the scooter.\nB. The dog is on the right side of the scooter.\n\nOptions:\nA. The dog is on the left side of the scooter.\nB. The dog is on the right side of the scooter.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The trash can is on the left side of the black chair." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_5156.jpg", "item_id": "sa_5156", "options": [ "The trash can is on the left side of the black chair.", "The trash can is on the right side of the black chair." ], "prompt_preset": "seal_zoom", "question": "Question: Is the trash can on the left or right side of the black chair?\nOptions: \nA. The trash can is on the left side of the black chair.\nB. The trash can is on the right side of the black chair.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the trash can on the left or right side of the black chair?\nOptions: \nA. The trash can is on the left side of the black chair.\nB. The trash can is on the right side of the black chair.\n\nOptions:\nA. The trash can is on the left side of the black chair.\nB. The trash can is on the right side of the black chair.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The motorcyclist is on the left side of the truck." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_64142.jpg", "item_id": "sa_64142", "options": [ "The motorcyclist is on the left side of the truck.", "The motorcyclist is on the right side of the truck." ], "prompt_preset": "seal_zoom", "question": "Question: Is the motorcyclist on the left or right side of the truck?\nOptions: \nA. The motorcyclist is on the left side of the truck.\nB. The motorcyclist is on the right side of the truck.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the motorcyclist on the left or right side of the truck?\nOptions: \nA. The motorcyclist is on the left side of the truck.\nB. The motorcyclist is on the right side of the truck.\n\nOptions:\nA. The motorcyclist is on the left side of the truck.\nB. The motorcyclist is on the right side of the truck.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The orange luggage is on the right side of the purple umbrella." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_5593.jpg", "item_id": "sa_5593", "options": [ "The orange luggage is on the right side of the purple umbrella.", "The orange luggage is on the left side of the purple umbrella." ], "prompt_preset": "seal_zoom", "question": "Question: Is the orange luggage on the left or right side of the purple umbrella?\nOptions: \nA. The orange luggage is on the right side of the purple umbrella.\nB. The orange luggage is on the left side of the purple umbrella.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the orange luggage on the left or right side of the purple umbrella?\nOptions: \nA. The orange luggage is on the right side of the purple umbrella.\nB. The orange luggage is on the left side of the purple umbrella.\n\nOptions:\nA. The orange luggage is on the right side of the purple umbrella.\nB. The orange luggage is on the left side of the purple umbrella.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The trash can is on the right side of the baby carriage." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_61121.jpg", "item_id": "sa_61121", "options": [ "The trash can is on the right side of the baby carriage.", "The trash can is on the left side of the baby carriage." ], "prompt_preset": "seal_zoom", "question": "Question: Is the trash can on the left or right side of the baby carriage?\nOptions: \nA. The trash can is on the right side of the baby carriage.\nB. The trash can is on the left side of the baby carriage.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the trash can on the left or right side of the baby carriage?\nOptions: \nA. The trash can is on the right side of the baby carriage.\nB. The trash can is on the left side of the baby carriage.\n\nOptions:\nA. The trash can is on the right side of the baby carriage.\nB. The trash can is on the left side of the baby carriage.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The blue bottle is on the left side of the carton." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_45802.jpg", "item_id": "sa_45802", "options": [ "The blue bottle is on the left side of the carton.", "The blue bottle is on the right side of the carton." ], "prompt_preset": "seal_zoom", "question": "Question: Is the blue bottle on the left or right side of the carton?\nOptions: \nA. The blue bottle is on the left side of the carton.\nB. The blue bottle is on the right side of the carton.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the blue bottle on the left or right side of the carton?\nOptions: \nA. The blue bottle is on the left side of the carton.\nB. The blue bottle is on the right side of the carton.\n\nOptions:\nA. The blue bottle is on the left side of the carton.\nB. The blue bottle is on the right side of the carton.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The bell is below the clock." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_28639.jpg", "item_id": "sa_28639", "options": [ "The bell is below the clock.", "The bell is above the clock." ], "prompt_preset": "seal_zoom", "question": "Question: Is the bell above or below the clock?\nOptions: \nA. The bell is below the clock.\nB. The bell is above the clock.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the bell above or below the clock?\nOptions: \nA. The bell is below the clock.\nB. The bell is above the clock.\n\nOptions:\nA. The bell is below the clock.\nB. The bell is above the clock.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The scooter is on the right side of the cyclist." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_33280.jpg", "item_id": "sa_33280", "options": [ "The scooter is on the right side of the cyclist.", "The scooter is on the left side of the cyclist." ], "prompt_preset": "seal_zoom", "question": "Question: Is the scooter on the left or right side of the cyclist?\nOptions: \nA. The scooter is on the right side of the cyclist.\nB. The scooter is on the left side of the cyclist.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the scooter on the left or right side of the cyclist?\nOptions: \nA. The scooter is on the right side of the cyclist.\nB. The scooter is on the left side of the cyclist.\n\nOptions:\nA. The scooter is on the right side of the cyclist.\nB. The scooter is on the left side of the cyclist.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The seesaw is on the right side of the red vehicle." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_53509.jpg", "item_id": "sa_53509", "options": [ "The seesaw is on the right side of the red vehicle.", "The seesaw is on the left side of the red vehicle." ], "prompt_preset": "seal_zoom", "question": "Question: Is the seesaw on the left or right side of the red vehicle?\nOptions: \nA. The seesaw is on the right side of the red vehicle.\nB. The seesaw is on the left side of the red vehicle.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the seesaw on the left or right side of the red vehicle?\nOptions: \nA. The seesaw is on the right side of the red vehicle.\nB. The seesaw is on the left side of the red vehicle.\n\nOptions:\nA. The seesaw is on the right side of the red vehicle.\nB. The seesaw is on the left side of the red vehicle.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The drum is on the left side of the yellow balloon." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_7228.jpg", "item_id": "sa_7228", "options": [ "The drum is on the left side of the yellow balloon.", "The drum is on the right side of the yellow balloon." ], "prompt_preset": "seal_zoom", "question": "Question: Is the drum on the left or right side of the yellow balloon?\nOptions: \nA. The drum is on the left side of the yellow balloon.\nB. The drum is on the right side of the yellow balloon.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the drum on the left or right side of the yellow balloon?\nOptions: \nA. The drum is on the left side of the yellow balloon.\nB. The drum is on the right side of the yellow balloon.\n\nOptions:\nA. The drum is on the left side of the yellow balloon.\nB. The drum is on the right side of the yellow balloon.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The blue mask is on the left side of the black mask." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_7621.jpg", "item_id": "sa_7621", "options": [ "The blue mask is on the left side of the black mask.", "The blue mask is on the right side of the black mask." ], "prompt_preset": "seal_zoom", "question": "Question: Is the blue mask on the left or right side of the black mask?\nOptions: \nA. The blue mask is on the left side of the black mask.\nB. The blue mask is on the right side of the black mask.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the blue mask on the left or right side of the black mask?\nOptions: \nA. The blue mask is on the left side of the black mask.\nB. The blue mask is on the right side of the black mask.\n\nOptions:\nA. The blue mask is on the left side of the black mask.\nB. The blue mask is on the right side of the black mask.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The blue truck is on the right side of the white vehicle." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_23264.jpg", "item_id": "sa_23264", "options": [ "The blue truck is on the right side of the white vehicle.", "The blue truck is on the left side of the white vehicle." ], "prompt_preset": "seal_zoom", "question": "Question: Is the blue truck on the left or right side of the white vehicle?\nOptions: \nA. The blue truck is on the right side of the white vehicle.\nB. The blue truck is on the left side of the white vehicle.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the blue truck on the left or right side of the white vehicle?\nOptions: \nA. The blue truck is on the right side of the white vehicle.\nB. The blue truck is on the left side of the white vehicle.\n\nOptions:\nA. The blue truck is on the right side of the white vehicle.\nB. The blue truck is on the left side of the white vehicle.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The bucket is on the right side of the cyclist." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_86732.jpg", "item_id": "sa_86732", "options": [ "The bucket is on the right side of the cyclist.", "The bucket is on the left side of the cyclist." ], "prompt_preset": "seal_zoom", "question": "Question: Is the bucket on the left or right side of the cyclist?\nOptions: \nA. The bucket is on the right side of the cyclist.\nB. The bucket is on the left side of the cyclist.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the bucket on the left or right side of the cyclist?\nOptions: \nA. The bucket is on the right side of the cyclist.\nB. The bucket is on the left side of the cyclist.\n\nOptions:\nA. The bucket is on the right side of the cyclist.\nB. The bucket is on the left side of the cyclist.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The shark doll is above the banana doll." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_52228.jpg", "item_id": "sa_52228", "options": [ "The shark doll is above the banana doll.", "The shark doll is below the banana doll." ], "prompt_preset": "seal_zoom", "question": "Question: Is the shark doll above or below the banana doll?\nOptions: \nA. The shark doll is above the banana doll.\nB. The shark doll is below the banana doll.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the shark doll above or below the banana doll?\nOptions: \nA. The shark doll is above the banana doll.\nB. The shark doll is below the banana doll.\n\nOptions:\nA. The shark doll is above the banana doll.\nB. The shark doll is below the banana doll.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The person with white trousers is on the left side of the person in blue." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_69634.jpg", "item_id": "sa_69634", "options": [ "The person with white trousers is on the left side of the person in blue.", "The person with white trousers is on the right side of the person in blue." ], "prompt_preset": "seal_zoom", "question": "Question: Is the person with white trousers on the left or right side of the person in blue?\nOptions: \nA. The person with white trousers is on the left side of the person in blue.\nB. The person with white trousers is on the right side of the person in blue.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the person with white trousers on the left or right side of the person in blue?\nOptions: \nA. The person with white trousers is on the left side of the person in blue.\nB. The person with white trousers is on the right side of the person in blue.\n\nOptions:\nA. The person with white trousers is on the left side of the person in blue.\nB. The person with white trousers is on the right side of the person in blue.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The small red car is on the left side of the baby carriage." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_23575.jpg", "item_id": "sa_23575", "options": [ "The small red car is on the left side of the baby carriage.", "The small red car is on the right side of the baby carriage." ], "prompt_preset": "seal_zoom", "question": "Question: Is the small red car on the left or right side of the baby carriage?\nOptions: \nA. The small red car is on the left side of the baby carriage.\nB. The small red car is on the right side of the baby carriage.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the small red car on the left or right side of the baby carriage?\nOptions: \nA. The small red car is on the left side of the baby carriage.\nB. The small red car is on the right side of the baby carriage.\n\nOptions:\nA. The small red car is on the left side of the baby carriage.\nB. The small red car is on the right side of the baby carriage.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The water bottle is closer to the camera." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_3222.jpg", "item_id": "sa_3222", "options": [ "The water bottle is closer to the camera.", "The vehicle is closer to the camera." ], "prompt_preset": "seal_zoom", "question": "Question: Which one is closer to the camera, the water bottle or the vehicle?\nOptions: \nA. The water bottle is closer to the camera.\nB. The vehicle is closer to the camera.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Which one is closer to the camera, the water bottle or the vehicle?\nOptions: \nA. The water bottle is closer to the camera.\nB. The vehicle is closer to the camera.\n\nOptions:\nA. The water bottle is closer to the camera.\nB. The vehicle is closer to the camera.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The red backpack is on the right side of the clock." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_4137.jpg", "item_id": "sa_4137", "options": [ "The red backpack is on the right side of the clock.", "The red backpack is on the left side of the clock." ], "prompt_preset": "seal_zoom", "question": "Question: Is the red backpack on the left or right side of the clock?\nOptions: \nA. The red backpack is on the right side of the clock.\nB. The red backpack is on the left side of the clock.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the red backpack on the left or right side of the clock?\nOptions: \nA. The red backpack is on the right side of the clock.\nB. The red backpack is on the left side of the clock.\n\nOptions:\nA. The red backpack is on the right side of the clock.\nB. The red backpack is on the left side of the clock.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The silver vehicle is on the left side of the red vehicle." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_34291.jpg", "item_id": "sa_34291", "options": [ "The silver vehicle is on the left side of the red vehicle.", "The silver vehicle is on the right side of the red vehicle." ], "prompt_preset": "seal_zoom", "question": "Question: Is the silver vehicle on the left or right side of the red vehicle?\nOptions: \nA. The silver vehicle is on the left side of the red vehicle.\nB. The silver vehicle is on the right side of the red vehicle.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the silver vehicle on the left or right side of the red vehicle?\nOptions: \nA. The silver vehicle is on the left side of the red vehicle.\nB. The silver vehicle is on the right side of the red vehicle.\n\nOptions:\nA. The silver vehicle is on the left side of the red vehicle.\nB. The silver vehicle is on the right side of the red vehicle.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The red balloon is below the white balloon." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_46221.jpg", "item_id": "sa_46221", "options": [ "The red balloon is below the white balloon.", "The red balloon is above the white balloon." ], "prompt_preset": "seal_zoom", "question": "Question: Is the red balloon above or below the white balloon?\nOptions: \nA. The red balloon is below the white balloon.\nB. The red balloon is above the white balloon.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the red balloon above or below the white balloon?\nOptions: \nA. The red balloon is below the white balloon.\nB. The red balloon is above the white balloon.\n\nOptions:\nA. The red balloon is below the white balloon.\nB. The red balloon is above the white balloon.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The umbrella is on the right side of the traffic light." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_10313.jpg", "item_id": "sa_10313", "options": [ "The umbrella is on the right side of the traffic light.", "The umbrella is on the left side of the traffic light." ], "prompt_preset": "seal_zoom", "question": "Question: Is the umbrella on the left or right side of the traffic light?\nOptions: \nA. The umbrella is on the right side of the traffic light.\nB. The umbrella is on the left side of the traffic light.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the umbrella on the left or right side of the traffic light?\nOptions: \nA. The umbrella is on the right side of the traffic light.\nB. The umbrella is on the left side of the traffic light.\n\nOptions:\nA. The umbrella is on the right side of the traffic light.\nB. The umbrella is on the left side of the traffic light.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The motorcycle is on the left side of the dog." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_6183.jpg", "item_id": "sa_6183", "options": [ "The motorcycle is on the left side of the dog.", "The motorcycle is on the right side of the dog." ], "prompt_preset": "seal_zoom", "question": "Question: Is the motorcycle on the left or right side of the dog?\nOptions: \nA. The motorcycle is on the left side of the dog.\nB. The motorcycle is on the right side of the dog.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the motorcycle on the left or right side of the dog?\nOptions: \nA. The motorcycle is on the left side of the dog.\nB. The motorcycle is on the right side of the dog.\n\nOptions:\nA. The motorcycle is on the left side of the dog.\nB. The motorcycle is on the right side of the dog.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The white chair is on the right side of the street." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_78272.jpg", "item_id": "sa_78272", "options": [ "The white chair is on the right side of the street.", "The white chair is on the left side of the street." ], "prompt_preset": "seal_zoom", "question": "Question: Is the white chair on the left or right side of the street?\nOptions: \nA. The white chair is on the right side of the street.\nB. The white chair is on the left side of the street.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the white chair on the left or right side of the street?\nOptions: \nA. The white chair is on the right side of the street.\nB. The white chair is on the left side of the street.\n\nOptions:\nA. The white chair is on the right side of the street.\nB. The white chair is on the left side of the street.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The pillar box is on the right side of the telephone booth." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_62783.jpg", "item_id": "sa_62783", "options": [ "The pillar box is on the right side of the telephone booth.", "The pillar box is on the left side of the telephone booth." ], "prompt_preset": "seal_zoom", "question": "Question: Is the pillar box on the left or right side of the telephone booth?\nOptions: \nA. The pillar box is on the right side of the telephone booth.\nB. The pillar box is on the left side of the telephone booth.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the pillar box on the left or right side of the telephone booth?\nOptions: \nA. The pillar box is on the right side of the telephone booth.\nB. The pillar box is on the left side of the telephone booth.\n\nOptions:\nA. The pillar box is on the right side of the telephone booth.\nB. The pillar box is on the left side of the telephone booth.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The dog is on the right side of the blue bicycle." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_69550.jpg", "item_id": "sa_69550", "options": [ "The dog is on the right side of the blue bicycle.", "The dog is on the left side of the blue bicycle." ], "prompt_preset": "seal_zoom", "question": "Question: Is the dog on the left or right side of the blue bicycle?\nOptions: \nA. The dog is on the right side of the blue bicycle.\nB. The dog is on the left side of the blue bicycle.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the dog on the left or right side of the blue bicycle?\nOptions: \nA. The dog is on the right side of the blue bicycle.\nB. The dog is on the left side of the blue bicycle.\n\nOptions:\nA. The dog is on the right side of the blue bicycle.\nB. The dog is on the left side of the blue bicycle.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The keyboard is on the left side of the guitar." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_61594.jpg", "item_id": "sa_61594", "options": [ "The keyboard is on the left side of the guitar.", "The keyboard is on the right side of the guitar." ], "prompt_preset": "seal_zoom", "question": "Question: Is the keyboard on the left or right side of the guitar?\nOptions: \nA. The keyboard is on the left side of the guitar.\nB. The keyboard is on the right side of the guitar.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the keyboard on the left or right side of the guitar?\nOptions: \nA. The keyboard is on the left side of the guitar.\nB. The keyboard is on the right side of the guitar.\n\nOptions:\nA. The keyboard is on the left side of the guitar.\nB. The keyboard is on the right side of the guitar.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The slide is on the right side of the life buoy." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_77731.jpg", "item_id": "sa_77731", "options": [ "The slide is on the right side of the life buoy.", "The slide is on the left side of the life buoy." ], "prompt_preset": "seal_zoom", "question": "Question: Is the slide on the left or right side of the life buoy?\nOptions: \nA. The slide is on the right side of the life buoy.\nB. The slide is on the left side of the life buoy.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the slide on the left or right side of the life buoy?\nOptions: \nA. The slide is on the right side of the life buoy.\nB. The slide is on the left side of the life buoy.\n\nOptions:\nA. The slide is on the right side of the life buoy.\nB. The slide is on the left side of the life buoy.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The woman with a backpack is on the left side of the person with a beanie." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_24464.jpg", "item_id": "sa_24464", "options": [ "The woman with a backpack is on the left side of the person with a beanie.", "The woman with a backpack is on the right side of the person with a beanie." ], "prompt_preset": "seal_zoom", "question": "Question: Is the woman with a backpack on the left or right side of the person with a beanie?\nOptions: \nA. The woman with a backpack is on the left side of the person with a beanie.\nB. The woman with a backpack is on the right side of the person with a beanie.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the woman with a backpack on the left or right side of the person with a beanie?\nOptions: \nA. The woman with a backpack is on the left side of the person with a beanie.\nB. The woman with a backpack is on the right side of the person with a beanie.\n\nOptions:\nA. The woman with a backpack is on the left side of the person with a beanie.\nB. The woman with a backpack is on the right side of the person with a beanie.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The green bucket is on the left side of the red tent." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_87051.jpg", "item_id": "sa_87051", "options": [ "The green bucket is on the left side of the red tent.", "The green bucket is on the right side of the red tent." ], "prompt_preset": "seal_zoom", "question": "Question: Is the green bucket on the left or right side of the red tent?\nOptions: \nA. The green bucket is on the left side of the red tent.\nB. The green bucket is on the right side of the red tent.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the green bucket on the left or right side of the red tent?\nOptions: \nA. The green bucket is on the left side of the red tent.\nB. The green bucket is on the right side of the red tent.\n\nOptions:\nA. The green bucket is on the left side of the red tent.\nB. The green bucket is on the right side of the red tent.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The dog is on the right side of the train." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_25747.jpg", "item_id": "sa_25747", "options": [ "The dog is on the right side of the train.", "The dog is on the left side of the train." ], "prompt_preset": "seal_zoom", "question": "Question: Is the dog on the left or right side of the train?\nOptions: \nA. The dog is on the right side of the train.\nB. The dog is on the left side of the train.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the dog on the left or right side of the train?\nOptions: \nA. The dog is on the right side of the train.\nB. The dog is on the left side of the train.\n\nOptions:\nA. The dog is on the right side of the train.\nB. The dog is on the left side of the train.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The black vehicle is closer to the camera." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_6531.jpg", "item_id": "sa_6531", "options": [ "The black vehicle is closer to the camera.", "The silver vehicle is closer to the camera." ], "prompt_preset": "seal_zoom", "question": "Question: Which one is closer to the camera, the black vehicle or the silver vehicle?\nOptions: \nA. The black vehicle is closer to the camera.\nB. The silver vehicle is closer to the camera.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Which one is closer to the camera, the black vehicle or the silver vehicle?\nOptions: \nA. The black vehicle is closer to the camera.\nB. The silver vehicle is closer to the camera.\n\nOptions:\nA. The black vehicle is closer to the camera.\nB. The silver vehicle is closer to the camera.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The green surfboard is on the left side of the purple umbrella." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_29022.jpg", "item_id": "sa_29022", "options": [ "The green surfboard is on the left side of the purple umbrella.", "The green surfboard is on the right side of the purple umbrella." ], "prompt_preset": "seal_zoom", "question": "Question: Is the green surfboard on the left or right side of the purple umbrella?\nOptions: \nA. The green surfboard is on the left side of the purple umbrella.\nB. The green surfboard is on the right side of the purple umbrella.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the green surfboard on the left or right side of the purple umbrella?\nOptions: \nA. The green surfboard is on the left side of the purple umbrella.\nB. The green surfboard is on the right side of the purple umbrella.\n\nOptions:\nA. The green surfboard is on the left side of the purple umbrella.\nB. The green surfboard is on the right side of the purple umbrella.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The baby carriage is on the right side of the cyclist." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_64257.jpg", "item_id": "sa_64257", "options": [ "The baby carriage is on the right side of the cyclist.", "The baby carriage is on the left side of the cyclist." ], "prompt_preset": "seal_zoom", "question": "Question: Is the baby carriage on the left or right side of the cyclist?\nOptions: \nA. The baby carriage is on the right side of the cyclist.\nB. The baby carriage is on the left side of the cyclist.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the baby carriage on the left or right side of the cyclist?\nOptions: \nA. The baby carriage is on the right side of the cyclist.\nB. The baby carriage is on the left side of the cyclist.\n\nOptions:\nA. The baby carriage is on the right side of the cyclist.\nB. The baby carriage is on the left side of the cyclist.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The sky wheel is on the right side of the person on the horse." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_7626.jpg", "item_id": "sa_7626", "options": [ "The sky wheel is on the right side of the person on the horse.", "The sky wheel is on the left side of the person on the horse." ], "prompt_preset": "seal_zoom", "question": "Question: Is the sky wheel on the left or right side of the person on the horse?\nOptions: \nA. The sky wheel is on the right side of the person on the horse.\nB. The sky wheel is on the left side of the person on the horse.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the sky wheel on the left or right side of the person on the horse?\nOptions: \nA. The sky wheel is on the right side of the person on the horse.\nB. The sky wheel is on the left side of the person on the horse.\n\nOptions:\nA. The sky wheel is on the right side of the person on the horse.\nB. The sky wheel is on the left side of the person on the horse.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The street lamp is on the left side of the blue van." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_42676.jpg", "item_id": "sa_42676", "options": [ "The street lamp is on the left side of the blue van.", "The street lamp is on the right side of the blue van." ], "prompt_preset": "seal_zoom", "question": "Question: Is the street lamp on the left or right side of the blue van?\nOptions: \nA. The street lamp is on the left side of the blue van.\nB. The street lamp is on the right side of the blue van.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the street lamp on the left or right side of the blue van?\nOptions: \nA. The street lamp is on the left side of the blue van.\nB. The street lamp is on the right side of the blue van.\n\nOptions:\nA. The street lamp is on the left side of the blue van.\nB. The street lamp is on the right side of the blue van.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
vstar_relative_position
{ "answer_letter": "A", "ground_truth": "A. The red car is on the left side of the police car." }
{ "bbox": [], "image_path": "/scratch/doqihu/work/eval_data/vstar_bench/relative_position/sa_19421.jpg", "item_id": "sa_19421", "options": [ "The red car is on the left side of the police car.", "The red car is on the right side of the police car." ], "prompt_preset": "seal_zoom", "question": "Question: Is the red car on the left or right side of the police car?\nOptions: \nA. The red car is on the left side of the police car.\nB. The red car is on the right side of the police car.\n", "target_object": [], "test_type": "relative_position" }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"},\"target_image\":{\"type\":\"integer\",\"description\":\"Specifies which image to operate on. Negative values (-1, -2) refer to the last N processed images. Positive values refer to images in sequence: 0=original image, 1=after first tool call, etc. Default is -1 (last processed image).\",\"default\":-1}},\"required\":[\"bbox_2d\"]}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**:\n<tool_call>\n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [50, 100, 200, 300], \"label\": \"the object in the center\", \"target_image\": 0}}\n</tool_call>", "role": "system" }, { "content": "<image>\nQuestion: Is the red car on the left or right side of the police car?\nOptions: \nA. The red car is on the left side of the police car.\nB. The red car is on the right side of the police car.\n\nOptions:\nA. The red car is on the left side of the police car.\nB. The red car is on the right side of the police car.\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
visual_reasoning
point_toolbox_v6
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
16