text
stringlengths 558
4.54k
| prefix
stringlengths 100
2k
| middle
stringlengths 10
500
| suffix
stringlengths 100
2k
| type
stringclasses 2
values |
|---|---|---|---|---|
<|fim_prefix|> }
_draw_track(E.key, E.value);
for (int i = 0; i < animation->track_get_key_count(E.key); i++) {
float offset = animation->track_get_key_time(E.key, i);
float value = animation->bezier_track_get_key_value(E.key, i);
Vector2 pos((offset - timeline->get_value()) * scale + limit, _bezier_h_to_pixel(value));
if (pos.x >= limit && pos.x <= right_limit) {
draw_texture(point, pos - point->get_size() / 2.0, E.value);
}
}
}
if (track_count > 0 && !hidden_tracks.has(selected_track)) {
// Draw edited curve.
_draw_track(selected_track, selected_track_color);
}
}
const bool draw_selection_handles = selection.size() > 1;
LocalVector<Point2> selected_pos;
// Draw editor handles.
{
edit_points.clear();
float scale = timeline->get_zoom_scale();
for (int i = 0; i < track_count; ++i) {
bool draw_track = _is_track_curves_displayed(i) && !locked_tracks.has(i);
if (!draw_selection_handles && !draw_track) {
continue;
}
int key_count = animation->track_get_key_count(i);
for (int j = 0; j < key_count; ++j) {
float offset = animation->track_get_key_time(i, j);
float value = animation->bezier_track_get_key_value(i, j);
bool is_selected = selection.has(IntPair(i, j));
if (is_selected) {
if (moving_selection) {
offset += moving_selection_offset.x;
value += moving_selection_offset.y;
} else if (scaling_selection) {
offset += -scaling_selection_offset.x + (offset - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
value += -scaling_selection_offset.y + (value - scaling_selection_pivot.y) * (scaling_selection_scale.y - 1);
}
}
Vector2 pos((offset - timeline->get_value()) * scale + limit, _bezier_h_to_pixel(value));
if (draw_selection_handles && is_selected) {
selected_pos.push_back(pos);
}
if (!draw_track) {
continue;<|fim_suffix|> Vector2 out_vec = animation->bezier_track_get_key_out_handle(i, j);
if ((moving_handle == 1 || moving_handle == -1) && moving_handle_track == i && moving_handle_key == j) {
in_vec = moving_handle_left;
}
if ((moving_handle == 1 || moving_handle == -1) && moving_handle_track == i && moving_handle_key == j) {
out_vec = moving_handle_right;
}
if (moving_inserted_key && moving_selection_from_key == j) {
Animation::HandleMode handle_mode = animation->bezier_track_get_key_handle_mode(i, j);
if (handle_mode != Animation::HANDLE_MODE_FREE) {
int key_prev = 0;
int key_next = moving_selection_from_key;
for (int k = 0; k < key_count; k++) {
if (k == moving_selection_from_key) {
continue;
}
if (animation->track_get_key_time(i, k) < offset) {
key_prev = k;
} else {
key_next = k;
break;
}
}
float prev_time = offset;
float prev_value = value;
if (key_prev != moving_selection_from_key) {
prev_time = animation->track_get_key_time(i, key_prev);
prev_value = animation->bezier_track_get_key_value(i, key_prev);
}
float next_time = offset;
float next_value = value;
if (key_next != moving_selection_from_key) {
next_time = animation->track_get_key_time(i, key_next);
next_value = animation->bezier_track_get_key_value(i, key_next);
}
animation->bezier_track_calculate_handles(offset, prev_time, prev_value, next_time, next_value, handle_mode, Animation::HANDLE_SET_MODE_AUTO, &in_vec, &out_vec);
}
}
Vector2 pos_in(((offset + in_vec.x) - timeline->get_value()) * scale + limit, _bezier_h_to_pixel(value + in_vec.y));
Vector2 pos_out(((offset + out_vec.x) - timeline->get_value()) * scale + limit, _bezier_h_to_pixel(value + out_vec.y));
<|fim_middle|> }
Vector2 in_vec = animation->bezier_track_get_key_in_handle(i, j);
|
}
_draw_track(E.key, E.value);
for (int i = 0; i < animation->track_get_key_count(E.key); i++) {
float offset = animation->track_get_key_time(E.key, i);
float value = animation->bezier_track_get_key_value(E.key, i);
Vector2 pos((offset - timeline->get_value()) * scale + limit, _bezier_h_to_pixel(value));
if (pos.x >= limit && pos.x <= right_limit) {
draw_texture(point, pos - point->get_size() / 2.0, E.value);
}
}
}
if (track_count > 0 && !hidden_tracks.has(selected_track)) {
// Draw edited curve.
_draw_track(selected_track, selected_track_color);
}
}
const bool draw_selection_handles = selection.size() > 1;
LocalVector<Point2> selected_pos;
// Draw editor handles.
{
edit_points.clear();
float scale = timeline->get_zoom_scale();
for (int i = 0; i < track_count; ++i) {
bool draw_track = _is_track_curves_displayed(i) && !locked_tracks.has(i);
if (!draw_selection_handles && !draw_track) {
continue;
}
int key_count = animation->track_get_key_count(i);
for (int j = 0; j < key_count; ++j) {
float offset = animation->track_get_key_time(i, j);
float value = animation->bezier_track_get_key_value(i, j);
bool is_selected = selection.has(IntPair(i, j));
if (is_selected) {
if (moving_selection) {
offset += moving_selection_offset.x;
value += moving_selection_offset.y;
} else if (scaling_selection) {
offset += -scaling_selection_offset.x + (offset - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
value += -scaling_selection_offset.y + (value - scaling_selection_pivot.y) * (scaling_selection_scale.y - 1);
}
}
Vector2 pos((offset - timeline->get_value()) * scale + limit, _bezier_h_to_pixel(value));
if (draw_selection_handles && is_selected) {
selected_pos.push_back(pos);
}
if (!draw_track) {
continue;
|
}
Vector2 in_vec = animation->bezier_track_get_key_in_handle(i, j);
|
Vector2 out_vec = animation->bezier_track_get_key_out_handle(i, j);
if ((moving_handle == 1 || moving_handle == -1) && moving_handle_track == i && moving_handle_key == j) {
in_vec = moving_handle_left;
}
if ((moving_handle == 1 || moving_handle == -1) && moving_handle_track == i && moving_handle_key == j) {
out_vec = moving_handle_right;
}
if (moving_inserted_key && moving_selection_from_key == j) {
Animation::HandleMode handle_mode = animation->bezier_track_get_key_handle_mode(i, j);
if (handle_mode != Animation::HANDLE_MODE_FREE) {
int key_prev = 0;
int key_next = moving_selection_from_key;
for (int k = 0; k < key_count; k++) {
if (k == moving_selection_from_key) {
continue;
}
if (animation->track_get_key_time(i, k) < offset) {
key_prev = k;
} else {
key_next = k;
break;
}
}
float prev_time = offset;
float prev_value = value;
if (key_prev != moving_selection_from_key) {
prev_time = animation->track_get_key_time(i, key_prev);
prev_value = animation->bezier_track_get_key_value(i, key_prev);
}
float next_time = offset;
float next_value = value;
if (key_next != moving_selection_from_key) {
next_time = animation->track_get_key_time(i, key_next);
next_value = animation->bezier_track_get_key_value(i, key_next);
}
animation->bezier_track_calculate_handles(offset, prev_time, prev_value, next_time, next_value, handle_mode, Animation::HANDLE_SET_MODE_AUTO, &in_vec, &out_vec);
}
}
Vector2 pos_in(((offset + in_vec.x) - timeline->get_value()) * scale + limit, _bezier_h_to_pixel(value + in_vec.y));
Vector2 pos_out(((offset + out_vec.x) - timeline->get_value()) * scale + limit, _bezier_h_to_pixel(value + out_vec.y));
|
random
|
<|fim_prefix|> lc = h_line_color;
lc.a *= 0.5;
draw_line(Point2(limit, i), Point2(right_limit, i), lc, Math::round(EDSCALE));
Color c = color;
c.a *= 0.5;
draw_string(font, Point2(limit + 8, i - 2), TS->format_number(rtos(Math::snapped((iv + 1) * scale, step))), HORIZONTAL_ALIGNMENT_LEFT, -1, font_size, c);
}
first = false;
prev_iv = iv;
}
}
// Draw other curves.
{
float scale = timeline->get_zoom_scale();
Ref<Texture2D> point = get_editor_theme_icon(SNAME("KeyValue"));
for (const KeyValue<int, Color> &E : subtrack_colors) {
if (hidden_tracks.has(E.key)) {
continue;
}
_draw_track(E.key, E.value);
for (int i = 0; i < animation->track_get_key_count(E.key); i++) {
float offset = animation->track_get_key_time(E.key, i);
float value = animation->bezier_track_get_key_value(E.key, i);
Vector2 pos((offset - timeline->get_value()) * scale + limit, _bezier_h_to_pixel(value));
if (pos.x >= limit && pos.x <= right_limit) {
draw_texture(point, pos - point->get_size() / 2.0, E.value);
}
}
}
if (track_count > 0 && !hidden_tracks.has(selected_track)) {
// Draw edited curve.
_draw_track(selected_track, selected_track_color);
}
}
const bool draw_selection_handles = selection.size() > 1;
LocalVector<Point2> selected_pos;
// Draw editor handles.
{
edit_points.clear();
float scale = timeline->get_zoom_scale();
for (int i = 0; i < track_count; ++i) {
bool draw_track = _is_track_curves_displayed(i) && !locked_tracks.has(i);
if (!draw_selection_handles && !draw_track) {
continue;
}
int key_count = animation->track_get_key_count(i);
for (int j = 0; j < key_count; ++j) {
float offset = animation->track_get_key_time(i, j);
float value = animation->bezier_track_get_key_value(i, j);
bool is_selected = selection.has(IntPair(i, j));
if (is_selected) {
if <|fim_suffix|> else if (scaling_selection) {
offset += -scaling_selection_offset.x + (offset - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
value += -scaling_selection_offset.y + (value - scaling_selection_pivot.y) * (scaling_selection_scale.y - 1);
}
}
Vector2 pos((offset - timeline->get_value()) * scale + limit, _bezier_h_to_pixel(value));
if (draw_selection_handles && is_selected) {
selected_pos.push_back(pos);
}
if (!draw_track) {
continue;
}
Vector2 in_vec = animation->bezier_track_get_key_in_handle(i, j);
Vector2 out_vec = animation->bezier_track_get_key_out_handle(i, j);
if ((moving_handle == 1 || moving_handle == -1) && moving_handle_track == i && moving_handle_key == j) {
in_vec = moving_handle_left;
}
if ((moving_handle == 1 || moving_handle == -1) && moving_handle_track == i && moving_handle_key == j) {
out_vec = moving_handle_right;
}
if (moving_inserted_key && moving_selection_from_key == j) {
Animation::HandleMode handle_mode = animation->bezier_track_get_key_handle_mode(i, j);
if (handle_mode != Animation::HANDLE_MODE_FREE) {
int key_prev = 0;
int key_next = moving_selection_from_key;
for (int k = 0; k < key_count; k++) {
if (k == moving_selection_from_key) {
continue;
}
if (animation->track_get_key_time(i, k) < offset) {
key_prev = k;
} else {
key_next = k;
break;
}
}
float prev_time = offset;
float prev_value = value;
if (key_prev != moving_selection_from_key) {
prev_time = animation->track_get_key_time(i, key_prev);
prev_value = animation->bezier_track_get_key_value(i, key_prev);
}
float next_time = offset;
float next_value = value;
if (key_next != moving_selection_from_key) {
next_time = animation-<|fim_middle|>(moving_selection) {
offset += moving_selection_offset.x;
value += moving_selection_offset.y;
}
|
lc = h_line_color;
lc.a *= 0.5;
draw_line(Point2(limit, i), Point2(right_limit, i), lc, Math::round(EDSCALE));
Color c = color;
c.a *= 0.5;
draw_string(font, Point2(limit + 8, i - 2), TS->format_number(rtos(Math::snapped((iv + 1) * scale, step))), HORIZONTAL_ALIGNMENT_LEFT, -1, font_size, c);
}
first = false;
prev_iv = iv;
}
}
// Draw other curves.
{
float scale = timeline->get_zoom_scale();
Ref<Texture2D> point = get_editor_theme_icon(SNAME("KeyValue"));
for (const KeyValue<int, Color> &E : subtrack_colors) {
if (hidden_tracks.has(E.key)) {
continue;
}
_draw_track(E.key, E.value);
for (int i = 0; i < animation->track_get_key_count(E.key); i++) {
float offset = animation->track_get_key_time(E.key, i);
float value = animation->bezier_track_get_key_value(E.key, i);
Vector2 pos((offset - timeline->get_value()) * scale + limit, _bezier_h_to_pixel(value));
if (pos.x >= limit && pos.x <= right_limit) {
draw_texture(point, pos - point->get_size() / 2.0, E.value);
}
}
}
if (track_count > 0 && !hidden_tracks.has(selected_track)) {
// Draw edited curve.
_draw_track(selected_track, selected_track_color);
}
}
const bool draw_selection_handles = selection.size() > 1;
LocalVector<Point2> selected_pos;
// Draw editor handles.
{
edit_points.clear();
float scale = timeline->get_zoom_scale();
for (int i = 0; i < track_count; ++i) {
bool draw_track = _is_track_curves_displayed(i) && !locked_tracks.has(i);
if (!draw_selection_handles && !draw_track) {
continue;
}
int key_count = animation->track_get_key_count(i);
for (int j = 0; j < key_count; ++j) {
float offset = animation->track_get_key_time(i, j);
float value = animation->bezier_track_get_key_value(i, j);
bool is_selected = selection.has(IntPair(i, j));
if (is_selected) {
if
|
(moving_selection) {
offset += moving_selection_offset.x;
value += moving_selection_offset.y;
}
|
else if (scaling_selection) {
offset += -scaling_selection_offset.x + (offset - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
value += -scaling_selection_offset.y + (value - scaling_selection_pivot.y) * (scaling_selection_scale.y - 1);
}
}
Vector2 pos((offset - timeline->get_value()) * scale + limit, _bezier_h_to_pixel(value));
if (draw_selection_handles && is_selected) {
selected_pos.push_back(pos);
}
if (!draw_track) {
continue;
}
Vector2 in_vec = animation->bezier_track_get_key_in_handle(i, j);
Vector2 out_vec = animation->bezier_track_get_key_out_handle(i, j);
if ((moving_handle == 1 || moving_handle == -1) && moving_handle_track == i && moving_handle_key == j) {
in_vec = moving_handle_left;
}
if ((moving_handle == 1 || moving_handle == -1) && moving_handle_track == i && moving_handle_key == j) {
out_vec = moving_handle_right;
}
if (moving_inserted_key && moving_selection_from_key == j) {
Animation::HandleMode handle_mode = animation->bezier_track_get_key_handle_mode(i, j);
if (handle_mode != Animation::HANDLE_MODE_FREE) {
int key_prev = 0;
int key_next = moving_selection_from_key;
for (int k = 0; k < key_count; k++) {
if (k == moving_selection_from_key) {
continue;
}
if (animation->track_get_key_time(i, k) < offset) {
key_prev = k;
} else {
key_next = k;
break;
}
}
float prev_time = offset;
float prev_value = value;
if (key_prev != moving_selection_from_key) {
prev_time = animation->track_get_key_time(i, key_prev);
prev_value = animation->bezier_track_get_key_value(i, key_prev);
}
float next_time = offset;
float next_value = value;
if (key_next != moving_selection_from_key) {
next_time = animation-
|
ast_based
|
<|fim_prefix|>// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "calibController.hpp"
#include <algorithm>
#include <cmath>
#include <ctime>
#include <opencv2/3d.hpp><|fim_suffix|> int yGridStep = mCalibData->imageSize.height / gridSize;
std::vector<int> pointsInCell(gridSize*gridSize);
std::fill(pointsInCell.begin(), pointsInCell.end(), 0);
for(std::vector<std::vector<cv::Point2f> >::iterator it = mCalibData->imagePoints.begin(); it != mCalibData->imagePoints.end(); ++it)
for(std::vector<cv::Point2f>::iterator pointIt = (*it).begin(); pointIt != (*it).end(); ++pointIt) {
int i = (int)((*pointIt).x / xGridStep);
int j = (int)((*pointIt).y / yGridStep);
pointsInCell[i*gridSize + j]++;
}
for(std::vector<cv::Mat>::iterator it = mCalibData->allCharucoCorners.begin(); it != mCalibData->allCharucoCorners.end(); ++it)
for(int l = 0; l < (*it).size[0]; l++) {
int i = (int)((*it).at<float>(l, 0) / xGridStep);
int j = (int)((*it).at<float>(l, 1) / yGridStep);
pointsInCell[i*gridSize + j]++;
}
cv::Mat mean, stdDev;
cv::meanStdDev(pointsInCell, mean, stdDev);
return mean.at<double>(0) / (stdDev.at<double>(0) + 1e-7);
}
calib::calibController::calibController()
{
mCalibFlags = 0;
}
calib::calibController::calibController(cv::Ptr<calib::calibrationData> data, int initialFlags, bool autoTuning, int minFramesNum) :
mCalibData(data)
{
mCalibFlags = initialFlags;
mNeedTuning = autoTuning;
mMinFramesNum = minFramesNum;
mConfIntervalsState = false;
mCoverageQualityState = false;
}
void calib::calibController::updateState()
{
if(mCalibData->cameraMatrix.total()) {
const double relErrEps = 0.05;
bool fConfState = false, cConfState = false, dConfState = true;
if(sigmaMult*mCalibData->stdDeviations.at<double>(0) / mCalibData->cameraMatrix.at<double>(0,0) < relErrEps &&
sigmaMult*mCalibData->stdDeviations.at<double>(1) / mCalibData->cameraMatrix.at<double>(1,1) < relErrEps)
fConfState = true;<|fim_middle|>#include <opencv2/calib.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/imgcodecs.hpp>
using namespace cv;
double calib::calibController::estimateCoverageQuality()
{
int gridSize = 10;
int xGridStep = mCalibData->imageSize.width / gridSize;
|
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "calibController.hpp"
#include <algorithm>
#include <cmath>
#include <ctime>
#include <opencv2/3d.hpp>
|
#include <opencv2/calib.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/imgcodecs.hpp>
using namespace cv;
double calib::calibController::estimateCoverageQuality()
{
int gridSize = 10;
int xGridStep = mCalibData->imageSize.width / gridSize;
|
int yGridStep = mCalibData->imageSize.height / gridSize;
std::vector<int> pointsInCell(gridSize*gridSize);
std::fill(pointsInCell.begin(), pointsInCell.end(), 0);
for(std::vector<std::vector<cv::Point2f> >::iterator it = mCalibData->imagePoints.begin(); it != mCalibData->imagePoints.end(); ++it)
for(std::vector<cv::Point2f>::iterator pointIt = (*it).begin(); pointIt != (*it).end(); ++pointIt) {
int i = (int)((*pointIt).x / xGridStep);
int j = (int)((*pointIt).y / yGridStep);
pointsInCell[i*gridSize + j]++;
}
for(std::vector<cv::Mat>::iterator it = mCalibData->allCharucoCorners.begin(); it != mCalibData->allCharucoCorners.end(); ++it)
for(int l = 0; l < (*it).size[0]; l++) {
int i = (int)((*it).at<float>(l, 0) / xGridStep);
int j = (int)((*it).at<float>(l, 1) / yGridStep);
pointsInCell[i*gridSize + j]++;
}
cv::Mat mean, stdDev;
cv::meanStdDev(pointsInCell, mean, stdDev);
return mean.at<double>(0) / (stdDev.at<double>(0) + 1e-7);
}
calib::calibController::calibController()
{
mCalibFlags = 0;
}
calib::calibController::calibController(cv::Ptr<calib::calibrationData> data, int initialFlags, bool autoTuning, int minFramesNum) :
mCalibData(data)
{
mCalibFlags = initialFlags;
mNeedTuning = autoTuning;
mMinFramesNum = minFramesNum;
mConfIntervalsState = false;
mCoverageQualityState = false;
}
void calib::calibController::updateState()
{
if(mCalibData->cameraMatrix.total()) {
const double relErrEps = 0.05;
bool fConfState = false, cConfState = false, dConfState = true;
if(sigmaMult*mCalibData->stdDeviations.at<double>(0) / mCalibData->cameraMatrix.at<double>(0,0) < relErrEps &&
sigmaMult*mCalibData->stdDeviations.at<double>(1) / mCalibData->cameraMatrix.at<double>(1,1) < relErrEps)
fConfState = true;
|
random
|
<|fim_prefix|>Sprite2D::get_frame_progress() const {
return frame_progress;
}
void AnimatedSprite2D::set_frame_and_progress(int p_frame, real_t p_progress) {
if (frames.is_null()) {
return;
}
bool has_animation = frames->has_animation(animation);
int end_frame = has_animation ? MAX(0, frames->get_frame_count(animation) - 1) : 0;
bool is_changed = frame != p_frame;
if (p_frame < 0) {
frame = 0;
} else if (has_animation && p_frame > end_frame) {
frame = end_frame;
} else {
frame = p_frame;
}
_calc_frame_speed_scale();
frame_progress = p_progress;
if (!is_changed) {
return; // No change, don't redraw.
}
queue_redraw();
emit_signal(SceneStringName(frame_changed));
}
void AnimatedSprite2D::set_speed_scale(float p_speed_scale) {
speed_scale = p_speed_scale;
}
float AnimatedSprite2D::get_speed_scale() const {
return speed_scale;
}
float AnimatedSprite2D::get_playing_speed() const {
if (!playing) {
return 0;
}
return speed_scale * custom_speed_scale;
}
void AnimatedSprite2D::set_centered(bool p_center) {
if (centered == p_center) {
return;
}
centered = p_center;
queue_redraw();
item_rect_changed();
}
bool AnimatedSprite2D::is_centered() const {
return centered;
}
void AnimatedSprite2D::set_offset(const Point2 &p_offset) {
if (offset == p_offset) {
return;
}
offset = p_offset;
queue_redraw();
item_rect_changed();
}
Point2 AnimatedSprite2D::get_offset() const {
return offset;
}
void AnimatedSprite2D::set_flip_h(bool p_flip) {
if (hflip == p_flip) {
return;
}
hflip = p_flip;
queue_redraw();
}
bool AnimatedSprite2D::is_flipped_h() const {
return hflip;
}
void AnimatedSprite2D::set_flip_v(bool p_flip) {
if (vflip == p_flip) {
return;
}
vflip = p_flip;
queue_redraw();
}
bool AnimatedSprite2D::is_flipped_v() const {
return vflip;
}
void AnimatedSprite2D::_res_changed() {
set_frame_and_progress(frame, frame_progress);
queue_redraw();
notify_property_list_changed();
}
bool AnimatedSprite2D::is_playing() const <|fim_suffix|>
void AnimatedSprite2D::set_autoplay(const String &p_name) {
if (is_inside_tree() && !Engine::get_singleton()->is_editor_hint()) {
WARN_PRINT("Setting autoplay after the node has been added to the scene has no effect.");
}
autoplay = p_name;
}
String AnimatedSprite2D::get_autoplay() const {
return autoplay;
}
void AnimatedSprite2D::play(const StringName &p_name, float p_custom_scale, bool p_from_end) {
StringName name = p_name;
if (name == StringName()) {
name = animation;
}
ERR_FAIL_COND_MSG(frames.is_null(), vformat("There is no animation with name '%s'.", name));
ERR_FAIL_COND_MSG(!frames->get_animation_names().has(name), vformat("There is no animation with name '%s'.", name));
if (frames->get_frame_count(name) == 0) {
return;
}
playing = true;
custom_speed_scale = p_custom_scale;
if (name != animation) {
animation = name;
int end_frame = MAX(0, frames->get_frame_count(animation) - 1);
if (p_from_end) {
set_frame_and_progress(end_frame, 1.0);
} else {
set_frame_and_progress(0, 0.0);
}
emit_signal(SceneStringName(animation_changed));
} else {
int end_frame = MAX(0, frames->get_frame_count(animation) - 1);
bool is_backward = std::signbit(speed_scale * custom_speed_scale);
if (p_from_end && is_backward && frame == 0 && frame_progress <= 0.0) {
set_frame_and_progress(end_frame, 1.0);
} else if (!p_from_end && !is_backward && frame == end_frame && frame_progress >= 1.0) {
set_frame_and_progress(0, 0.0);
}
}
set_process_internal(true);
notify_property_list_changed();
queue_redraw();
}
void AnimatedSprite2D::play_backwards(const StringName &p_name) {
play(p_name, -1, true);
}
void AnimatedSprite2D::_stop_internal(bool p_reset) {
playing = false;
if (p_reset) {
custom_speed_scale = 1.0;
set_frame_and_progress(0, 0.0);
}
notify_property_list_changed();
set_process_internal(false);
}
void AnimatedSprite2D::pause() {
_stop_internal(false);
}
void AnimatedSprite2D::stop() {
_stop_internal(<|fim_middle|>{
return playing;
}
|
Sprite2D::get_frame_progress() const {
return frame_progress;
}
void AnimatedSprite2D::set_frame_and_progress(int p_frame, real_t p_progress) {
if (frames.is_null()) {
return;
}
bool has_animation = frames->has_animation(animation);
int end_frame = has_animation ? MAX(0, frames->get_frame_count(animation) - 1) : 0;
bool is_changed = frame != p_frame;
if (p_frame < 0) {
frame = 0;
} else if (has_animation && p_frame > end_frame) {
frame = end_frame;
} else {
frame = p_frame;
}
_calc_frame_speed_scale();
frame_progress = p_progress;
if (!is_changed) {
return; // No change, don't redraw.
}
queue_redraw();
emit_signal(SceneStringName(frame_changed));
}
void AnimatedSprite2D::set_speed_scale(float p_speed_scale) {
speed_scale = p_speed_scale;
}
float AnimatedSprite2D::get_speed_scale() const {
return speed_scale;
}
float AnimatedSprite2D::get_playing_speed() const {
if (!playing) {
return 0;
}
return speed_scale * custom_speed_scale;
}
void AnimatedSprite2D::set_centered(bool p_center) {
if (centered == p_center) {
return;
}
centered = p_center;
queue_redraw();
item_rect_changed();
}
bool AnimatedSprite2D::is_centered() const {
return centered;
}
void AnimatedSprite2D::set_offset(const Point2 &p_offset) {
if (offset == p_offset) {
return;
}
offset = p_offset;
queue_redraw();
item_rect_changed();
}
Point2 AnimatedSprite2D::get_offset() const {
return offset;
}
void AnimatedSprite2D::set_flip_h(bool p_flip) {
if (hflip == p_flip) {
return;
}
hflip = p_flip;
queue_redraw();
}
bool AnimatedSprite2D::is_flipped_h() const {
return hflip;
}
void AnimatedSprite2D::set_flip_v(bool p_flip) {
if (vflip == p_flip) {
return;
}
vflip = p_flip;
queue_redraw();
}
bool AnimatedSprite2D::is_flipped_v() const {
return vflip;
}
void AnimatedSprite2D::_res_changed() {
set_frame_and_progress(frame, frame_progress);
queue_redraw();
notify_property_list_changed();
}
bool AnimatedSprite2D::is_playing() const
|
{
return playing;
}
|
void AnimatedSprite2D::set_autoplay(const String &p_name) {
if (is_inside_tree() && !Engine::get_singleton()->is_editor_hint()) {
WARN_PRINT("Setting autoplay after the node has been added to the scene has no effect.");
}
autoplay = p_name;
}
String AnimatedSprite2D::get_autoplay() const {
return autoplay;
}
void AnimatedSprite2D::play(const StringName &p_name, float p_custom_scale, bool p_from_end) {
StringName name = p_name;
if (name == StringName()) {
name = animation;
}
ERR_FAIL_COND_MSG(frames.is_null(), vformat("There is no animation with name '%s'.", name));
ERR_FAIL_COND_MSG(!frames->get_animation_names().has(name), vformat("There is no animation with name '%s'.", name));
if (frames->get_frame_count(name) == 0) {
return;
}
playing = true;
custom_speed_scale = p_custom_scale;
if (name != animation) {
animation = name;
int end_frame = MAX(0, frames->get_frame_count(animation) - 1);
if (p_from_end) {
set_frame_and_progress(end_frame, 1.0);
} else {
set_frame_and_progress(0, 0.0);
}
emit_signal(SceneStringName(animation_changed));
} else {
int end_frame = MAX(0, frames->get_frame_count(animation) - 1);
bool is_backward = std::signbit(speed_scale * custom_speed_scale);
if (p_from_end && is_backward && frame == 0 && frame_progress <= 0.0) {
set_frame_and_progress(end_frame, 1.0);
} else if (!p_from_end && !is_backward && frame == end_frame && frame_progress >= 1.0) {
set_frame_and_progress(0, 0.0);
}
}
set_process_internal(true);
notify_property_list_changed();
queue_redraw();
}
void AnimatedSprite2D::play_backwards(const StringName &p_name) {
play(p_name, -1, true);
}
void AnimatedSprite2D::_stop_internal(bool p_reset) {
playing = false;
if (p_reset) {
custom_speed_scale = 1.0;
set_frame_and_progress(0, 0.0);
}
notify_property_list_changed();
set_process_internal(false);
}
void AnimatedSprite2D::pause() {
_stop_internal(false);
}
void AnimatedSprite2D::stop() {
_stop_internal(
|
ast_based
|
<|fim_prefix|>d::vector<Operation>& run_outputs,
std::vector<Tensor>* outputs) const {
return Run(RunOptions(), inputs, fetch_outputs, run_outputs, outputs,
nullptr);
}
absl::Status ClientSession::Impl::MaybeExtendGraph() const {
mutex_lock l(mu_);
int num_nodes = graph_->num_node_ids();
if (num_nodes > last_num_graph_nodes_) {
GraphDef graph_def;
graph_->ToGraphDefSubRange(&graph_def, last_num_graph_nodes_);
last_num_graph_nodes_ = num_nodes;
return session_->Extend(graph_def);
}
return absl::OkStatus();
}
absl::Status ClientSession::Run(const RunOptions& run_options,
const FeedType& inputs,
const std::vector<Output>& fetch_outputs,
const std::vector<Operation>& run_outputs,
std::vector<Tensor>* outputs,
RunMetadata* run_metadata) const {
std::vector<std::pair<string, Tensor>> feeds;
feeds.reserve(inputs.size());
for (auto const& feed : inputs) {
TF_RETURN_IF_ERROR(feed.second.status);
feeds.emplace_back(std::piecewise_construct,
std::forward_as_tuple(feed.first.name()),
std::forward_as_tuple(feed.second.tensor));
}
std::vector<string> output_tensor_names;
output_tensor_names.reserve(fetch_outputs.size());
for (auto const& output : fetch_outputs) {
output_tensor_names.push_back(output.name());
}
std::vector<string> target_node_names;
target_node_names.reserve(run_outputs.size());
for (auto const& output : run_outputs) {
target_node_names.push_back(output.node()->name());
}
TF_RETURN_IF_ERROR(impl()->MaybeExtendGraph());
return impl()->session_->Run(run_options, feeds, output_tensor_names,
target_node_names, outputs, run_metadata);
}
absl::Status ClientSession::Run(
const RunOptions& run_options, const FeedType& inputs,
<|fim_suffix|>,
const std::vector<Operation>& run_outputs, std::vector<Tensor>* outputs,
RunMetadata* run_metadata,
const thread::ThreadPoolOptions& threadpool_options) const {
std::vector<std::pair<string, Tensor>> feeds;
for (auto const& feed : inputs) {
TF_RETURN_IF_ERROR(feed.second.status);
feeds.emplace_back(feed.first.name(), feed.second.tensor);
}
std::vector<string> output_tensor_names;
output_tensor_names.reserve(fetch_outputs.size());
for (auto const& output : fetch_outputs) {
output_tensor_names.push_back(output.name());
}
std::vector<string> target_node_names;
target_node_names.reserve(run_outputs.size());
for (auto const& output : run_outputs) {
target_node_names.push_back(output.node()->name());
}
TF_RETURN_IF_ERROR(impl()->MaybeExtendGraph());
return impl()->session_->Run(run_options, feeds, output_tensor_names,
target_node_names, outputs, run_metadata,
threadpool_options);
}
absl::Status ClientSession::MakeCallable(
const CallableOptions& callable_options, CallableHandle* out_handle) {
TF_RETURN_IF_ERROR(impl()->MaybeExtendGraph());
return impl()->session_->MakeCallable(callable_options, out_handle);
}
absl::Status ClientSession::RunCallable(CallableHandle handle,
const std::vector<Tensor>& feed_tensors,
std::vector<Tensor>* fetch_tensors,
RunMetadata* run_metadata) {
return impl()->session_->RunCallable(handle, feed_tensors, fetch_tensors,
run_metadata);
}
absl::Status ClientSession::RunCallable(
CallableHandle handle, const std::vector<Tensor>& feed_tensors,
std::vector<Tensor>* fetch_tensors, RunMetadata* run_metadata,
const thread::ThreadPoolOptions& options) {
return impl()->session_->RunCallable(handle, feed_tensors, fetch_tensors,
<|fim_middle|>const std::vector<Output>& fetch_outputs
|
d::vector<Operation>& run_outputs,
std::vector<Tensor>* outputs) const {
return Run(RunOptions(), inputs, fetch_outputs, run_outputs, outputs,
nullptr);
}
absl::Status ClientSession::Impl::MaybeExtendGraph() const {
mutex_lock l(mu_);
int num_nodes = graph_->num_node_ids();
if (num_nodes > last_num_graph_nodes_) {
GraphDef graph_def;
graph_->ToGraphDefSubRange(&graph_def, last_num_graph_nodes_);
last_num_graph_nodes_ = num_nodes;
return session_->Extend(graph_def);
}
return absl::OkStatus();
}
absl::Status ClientSession::Run(const RunOptions& run_options,
const FeedType& inputs,
const std::vector<Output>& fetch_outputs,
const std::vector<Operation>& run_outputs,
std::vector<Tensor>* outputs,
RunMetadata* run_metadata) const {
std::vector<std::pair<string, Tensor>> feeds;
feeds.reserve(inputs.size());
for (auto const& feed : inputs) {
TF_RETURN_IF_ERROR(feed.second.status);
feeds.emplace_back(std::piecewise_construct,
std::forward_as_tuple(feed.first.name()),
std::forward_as_tuple(feed.second.tensor));
}
std::vector<string> output_tensor_names;
output_tensor_names.reserve(fetch_outputs.size());
for (auto const& output : fetch_outputs) {
output_tensor_names.push_back(output.name());
}
std::vector<string> target_node_names;
target_node_names.reserve(run_outputs.size());
for (auto const& output : run_outputs) {
target_node_names.push_back(output.node()->name());
}
TF_RETURN_IF_ERROR(impl()->MaybeExtendGraph());
return impl()->session_->Run(run_options, feeds, output_tensor_names,
target_node_names, outputs, run_metadata);
}
absl::Status ClientSession::Run(
const RunOptions& run_options, const FeedType& inputs,
|
const std::vector<Output>& fetch_outputs
|
,
const std::vector<Operation>& run_outputs, std::vector<Tensor>* outputs,
RunMetadata* run_metadata,
const thread::ThreadPoolOptions& threadpool_options) const {
std::vector<std::pair<string, Tensor>> feeds;
for (auto const& feed : inputs) {
TF_RETURN_IF_ERROR(feed.second.status);
feeds.emplace_back(feed.first.name(), feed.second.tensor);
}
std::vector<string> output_tensor_names;
output_tensor_names.reserve(fetch_outputs.size());
for (auto const& output : fetch_outputs) {
output_tensor_names.push_back(output.name());
}
std::vector<string> target_node_names;
target_node_names.reserve(run_outputs.size());
for (auto const& output : run_outputs) {
target_node_names.push_back(output.node()->name());
}
TF_RETURN_IF_ERROR(impl()->MaybeExtendGraph());
return impl()->session_->Run(run_options, feeds, output_tensor_names,
target_node_names, outputs, run_metadata,
threadpool_options);
}
absl::Status ClientSession::MakeCallable(
const CallableOptions& callable_options, CallableHandle* out_handle) {
TF_RETURN_IF_ERROR(impl()->MaybeExtendGraph());
return impl()->session_->MakeCallable(callable_options, out_handle);
}
absl::Status ClientSession::RunCallable(CallableHandle handle,
const std::vector<Tensor>& feed_tensors,
std::vector<Tensor>* fetch_tensors,
RunMetadata* run_metadata) {
return impl()->session_->RunCallable(handle, feed_tensors, fetch_tensors,
run_metadata);
}
absl::Status ClientSession::RunCallable(
CallableHandle handle, const std::vector<Tensor>& feed_tensors,
std::vector<Tensor>* fetch_tensors, RunMetadata* run_metadata,
const thread::ThreadPoolOptions& options) {
return impl()->session_->RunCallable(handle, feed_tensors, fetch_tensors,
|
ast_based
|
<|fim_prefix|>/**********************************************************************
* File: baseapi.cpp
* Description: Simple API for calling tesseract.
* Author: Ray Smith
*
* (C) Copyright 2006, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#define _USE_MATH_DEFINES // for M_PI
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "boxword.h" // for BoxWord
#include "coutln.h" // for C_OUTLINE_IT, C_OUTLINE_LIST
#include "dawg_cache.h" // for DawgCache
#include "dict.h" // for Dict
#include "elst.h" // for ELIST_ITERATOR, ELISTIZE, ELISTIZEH
#include "environ.h" // for l_uint8
#ifndef DISABLED_LEGACY_ENGINE
#include "equationdetect.h" // for EquationDetect, destructor of equ_detect_<|fim_suffix|>#include "helpers.h" // for IntCastRounded, chomp_string, copy_string
#include "host.h" // for MAX_PATH
#include "imageio.h" // for IFF_TIFF_G4, IFF_TIFF, IFF_TIFF_G3, ...
#ifndef DISABLED_LEGACY_ENGINE
# include "intfx.h" // for INT_FX_RESULT_STRUCT
#endif
#include "mutableiterator.h" // for MutableIterator
#include "normalis.h" // for kBlnBaselineOffset, kBlnXHeight
#include "pageres.h" // for PAGE_RES_IT, WERD_RES, PAGE_RES, CR_DE...
#include "paragraphs.h" // for DetectParagraphs
#include "params.h" // for BoolParam, IntParam, DoubleParam, Stri...
#include "pdblock.h" // for PDBLK
#include "points.h" // for FCOORD
#include "polyblk.h" // for POLY_BLOCK
#include "rect.h" // for TBOX
#include "stepblob.h" // for C_BLOB_IT, C_BLOB, C_BLOB_LIST
#include "tessdatamanager.h" // for TessdataManager, kTrainedDataSuffix
#include "tesseractclass.h" // for Tesseract
#include "tprintf.h" // for tprintf
#include "werd.h" // for WERD, WERD_IT, W_FUZZY_NON, W_FUZZY_SP
#include "thresholder.h" // for ImageThresholder
#include <tesseract/baseapi.h>
#include <tesseract/ocrclass.h> // for ETEXT_DESC
#include <tesseract/osdetect.h> // for OSResults, OSBestResult, OrientationId...
#include <tesseract/renderer.h> // for TessResultRenderer
#include <tesseract/resultiterator.h> // for ResultIterator
#include <cmath> // for round, M_PI
#include <cstdint> // for int32_t
#include <cstring> // for strcmp, strcpy
#include <filesystem> // for std::filesystem
#include <fstream> // for size_t
#include <iostream> // for std::cin
#include <locale> // for std::locale::classic
#include <memory> // for std::unique_ptr
#include <set> // for std::pair
#include <sstream> // for std::stringstream
#include <vector> // for std::vector
#include <allheaders.h> // for pixDestroy, boxCreate, boxaAddBox, box...
#ifdef HAVE_LIBCURL
# include <curl/curl.h>
#endif
<|fim_middle|>#endif // ndef DISABLED_LEGACY_ENGINE
#include "errcode.h" // for ASSERT_HOST
|
/**********************************************************************
* File: baseapi.cpp
* Description: Simple API for calling tesseract.
* Author: Ray Smith
*
* (C) Copyright 2006, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#define _USE_MATH_DEFINES // for M_PI
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "boxword.h" // for BoxWord
#include "coutln.h" // for C_OUTLINE_IT, C_OUTLINE_LIST
#include "dawg_cache.h" // for DawgCache
#include "dict.h" // for Dict
#include "elst.h" // for ELIST_ITERATOR, ELISTIZE, ELISTIZEH
#include "environ.h" // for l_uint8
#ifndef DISABLED_LEGACY_ENGINE
#include "equationdetect.h" // for EquationDetect, destructor of equ_detect_
|
#endif // ndef DISABLED_LEGACY_ENGINE
#include "errcode.h" // for ASSERT_HOST
|
#include "helpers.h" // for IntCastRounded, chomp_string, copy_string
#include "host.h" // for MAX_PATH
#include "imageio.h" // for IFF_TIFF_G4, IFF_TIFF, IFF_TIFF_G3, ...
#ifndef DISABLED_LEGACY_ENGINE
# include "intfx.h" // for INT_FX_RESULT_STRUCT
#endif
#include "mutableiterator.h" // for MutableIterator
#include "normalis.h" // for kBlnBaselineOffset, kBlnXHeight
#include "pageres.h" // for PAGE_RES_IT, WERD_RES, PAGE_RES, CR_DE...
#include "paragraphs.h" // for DetectParagraphs
#include "params.h" // for BoolParam, IntParam, DoubleParam, Stri...
#include "pdblock.h" // for PDBLK
#include "points.h" // for FCOORD
#include "polyblk.h" // for POLY_BLOCK
#include "rect.h" // for TBOX
#include "stepblob.h" // for C_BLOB_IT, C_BLOB, C_BLOB_LIST
#include "tessdatamanager.h" // for TessdataManager, kTrainedDataSuffix
#include "tesseractclass.h" // for Tesseract
#include "tprintf.h" // for tprintf
#include "werd.h" // for WERD, WERD_IT, W_FUZZY_NON, W_FUZZY_SP
#include "thresholder.h" // for ImageThresholder
#include <tesseract/baseapi.h>
#include <tesseract/ocrclass.h> // for ETEXT_DESC
#include <tesseract/osdetect.h> // for OSResults, OSBestResult, OrientationId...
#include <tesseract/renderer.h> // for TessResultRenderer
#include <tesseract/resultiterator.h> // for ResultIterator
#include <cmath> // for round, M_PI
#include <cstdint> // for int32_t
#include <cstring> // for strcmp, strcpy
#include <filesystem> // for std::filesystem
#include <fstream> // for size_t
#include <iostream> // for std::cin
#include <locale> // for std::locale::classic
#include <memory> // for std::unique_ptr
#include <set> // for std::pair
#include <sstream> // for std::stringstream
#include <vector> // for std::vector
#include <allheaders.h> // for pixDestroy, boxCreate, boxaAddBox, box...
#ifdef HAVE_LIBCURL
# include <curl/curl.h>
#endif
|
random
|
<|fim_prefix|>dif // ndef DISABLED_LEGACY_ENGINE
} else {
// Now run the main recognition.
bool wait_for_text = true;
GetBoolVariable("paragraph_text_based", &wait_for_text);
if (!wait_for_text) {
DetectParagraphs(false);
}
if (tesseract_->recog_all_words(page_res_, monitor, nullptr, nullptr, 0)) {
if (wait_for_text) {
DetectParagraphs(true);
}
} else {
result = -1;
}
}
return result;
}
// Takes ownership of the input pix.
void TessBaseAPI::SetInputImage(Pix *pix) {
tesseract_->set_pix_original(pix);
}
Pix *TessBaseAPI::GetInputImage() {
return tesseract_->pix_original();
}
const char *TessBaseAPI::GetInputName() {
if (!input_file_.empty()) {
return input_file_.c_str();
}
return nullptr;
}
const char *TessBaseAPI::GetDatapath() {
return tesseract_->datadir.c_str();
}
int TessBaseAPI::GetSourceYResolution() {
if (thresholder_ == nullptr)
return -1;
return thresholder_->GetSourceYResolution();
}
// If flist exists, get data from there. Otherwise get data from buf.
// Seems convoluted, but is the easiest way I know of to meet multiple
// goals. Support streaming from stdin, and also work on platforms
// lacking fmemopen.
// TODO: check different logic for flist/buf and simplify.
bool TessBaseAPI::ProcessPagesFileList(FILE *flist, std::string *buf, const char *retry_config,
int timeout_millisec, TessResultRenderer *renderer,
int tessedit_page_number) {
if (!flist && !buf) {
return false;
}
unsigned page = (tessedit_page_number >= 0) ? tessedit_page_number : 0;
char pagename[MAX_PATH];
std::vector<std::string> lines;
if (!flist) {
std::string line;
for (const auto ch : *buf) {
if (ch == '\n') {
lines.push_back(line);
line.clear();
} else {
line.push_back(ch);
}
}
if (!line.empty()) {
// Add last line without terminating LF.
<|fim_suffix|>
}
if (lines.empty()) {
return false;
}
}
// Skip to the requested page number.
for (unsigned i = 0; i < page; i++) {
if (flist) {
if (fgets(pagename, sizeof(pagename), flist) == nullptr) {
break;
}
}
}
// Begin producing output
if (renderer && !renderer->BeginDocument(document_title.c_str())) {
return false;
}
// Loop over all pages - or just the requested one
while (true) {
if (flist) {
if (fgets(pagename, sizeof(pagename), flist) == nullptr) {
break;
}
} else {
if (page >= lines.size()) {
break;
}
snprintf(pagename, sizeof(pagename), "%s", lines[page].c_str());
}
chomp_string(pagename);
Pix *pix = pixRead(pagename);
if (pix == nullptr) {
tprintf("Image file %s cannot be read!\n", pagename);
return false;
}
tprintf("Page %u : %s\n", page, pagename);
bool r = ProcessPage(pix, page, pagename, retry_config, timeout_millisec, renderer);
pixDestroy(&pix);
if (!r) {
return false;
}
if (tessedit_page_number >= 0) {
break;
}
++page;
}
// Finish producing output
if (renderer && !renderer->EndDocument()) {
return false;
}
return true;
}
bool TessBaseAPI::ProcessPagesMultipageTiff(const l_uint8 *data, size_t size, const char *filename,
const char *retry_config, int timeout_millisec,
TessResultRenderer *renderer,
int tessedit_page_number) {
Pix *pix = nullptr;
int page = (tessedit_page_number >= 0) ? tessedit_page_number : 0;
size_t offset = 0;
for (;; ++page) {
if (tessedit_page_number >= 0) {
page = tessedit_page_number;
pix = (data) ? pixReadMemTiff(data, size, page) : pixReadTiff(filename, page);
} else {
pix = (data) ? pixReadMemFromMultipageTiff(data, size, &offset)
: pixRead<|fim_middle|>lines.push_back(line);
|
dif // ndef DISABLED_LEGACY_ENGINE
} else {
// Now run the main recognition.
bool wait_for_text = true;
GetBoolVariable("paragraph_text_based", &wait_for_text);
if (!wait_for_text) {
DetectParagraphs(false);
}
if (tesseract_->recog_all_words(page_res_, monitor, nullptr, nullptr, 0)) {
if (wait_for_text) {
DetectParagraphs(true);
}
} else {
result = -1;
}
}
return result;
}
// Takes ownership of the input pix.
void TessBaseAPI::SetInputImage(Pix *pix) {
tesseract_->set_pix_original(pix);
}
Pix *TessBaseAPI::GetInputImage() {
return tesseract_->pix_original();
}
const char *TessBaseAPI::GetInputName() {
if (!input_file_.empty()) {
return input_file_.c_str();
}
return nullptr;
}
const char *TessBaseAPI::GetDatapath() {
return tesseract_->datadir.c_str();
}
int TessBaseAPI::GetSourceYResolution() {
if (thresholder_ == nullptr)
return -1;
return thresholder_->GetSourceYResolution();
}
// If flist exists, get data from there. Otherwise get data from buf.
// Seems convoluted, but is the easiest way I know of to meet multiple
// goals. Support streaming from stdin, and also work on platforms
// lacking fmemopen.
// TODO: check different logic for flist/buf and simplify.
bool TessBaseAPI::ProcessPagesFileList(FILE *flist, std::string *buf, const char *retry_config,
int timeout_millisec, TessResultRenderer *renderer,
int tessedit_page_number) {
if (!flist && !buf) {
return false;
}
unsigned page = (tessedit_page_number >= 0) ? tessedit_page_number : 0;
char pagename[MAX_PATH];
std::vector<std::string> lines;
if (!flist) {
std::string line;
for (const auto ch : *buf) {
if (ch == '\n') {
lines.push_back(line);
line.clear();
} else {
line.push_back(ch);
}
}
if (!line.empty()) {
// Add last line without terminating LF.
|
lines.push_back(line);
|
}
if (lines.empty()) {
return false;
}
}
// Skip to the requested page number.
for (unsigned i = 0; i < page; i++) {
if (flist) {
if (fgets(pagename, sizeof(pagename), flist) == nullptr) {
break;
}
}
}
// Begin producing output
if (renderer && !renderer->BeginDocument(document_title.c_str())) {
return false;
}
// Loop over all pages - or just the requested one
while (true) {
if (flist) {
if (fgets(pagename, sizeof(pagename), flist) == nullptr) {
break;
}
} else {
if (page >= lines.size()) {
break;
}
snprintf(pagename, sizeof(pagename), "%s", lines[page].c_str());
}
chomp_string(pagename);
Pix *pix = pixRead(pagename);
if (pix == nullptr) {
tprintf("Image file %s cannot be read!\n", pagename);
return false;
}
tprintf("Page %u : %s\n", page, pagename);
bool r = ProcessPage(pix, page, pagename, retry_config, timeout_millisec, renderer);
pixDestroy(&pix);
if (!r) {
return false;
}
if (tessedit_page_number >= 0) {
break;
}
++page;
}
// Finish producing output
if (renderer && !renderer->EndDocument()) {
return false;
}
return true;
}
bool TessBaseAPI::ProcessPagesMultipageTiff(const l_uint8 *data, size_t size, const char *filename,
const char *retry_config, int timeout_millisec,
TessResultRenderer *renderer,
int tessedit_page_number) {
Pix *pix = nullptr;
int page = (tessedit_page_number >= 0) ? tessedit_page_number : 0;
size_t offset = 0;
for (;; ++page) {
if (tessedit_page_number >= 0) {
page = tessedit_page_number;
pix = (data) ? pixReadMemTiff(data, size, page) : pixReadTiff(filename, page);
} else {
pix = (data) ? pixReadMemFromMultipageTiff(data, size, &offset)
: pixRead
|
ast_based
|
<|fim_prefix|> diff_tmp.resize(curr_size + ggml_nbytes(t));
memcpy(diff_tmp.data() + curr_size, t->data, ggml_nbytes(t));
}
}
// build the v_diff tensors from v_diff_tmp (v_diff need to be transposed)
// TODO @ngxson : maybe add option NOT to transpose v_diff; will be useful for "mean" method
void build_v_diff(bool transpose) {
printf("build_v_diff\n");
for (int il = 0; il < n_layers - 1; il++) {
auto & diff_tmp = v_diff_tmp[il];
int n_elem = diff_tmp.size() / sizeof(float);
GGML_ASSERT(n_elem % n_embd == 0);
int n_rows = n_elem / n_embd;
struct ggml_tensor * diff = transpose
? ggml_new_tensor_2d(ctx_ggml, GGML_TYPE_F32, n_rows, n_embd)
: ggml_new_tensor_2d(ctx_ggml, GGML_TYPE_F32, n_embd, n_rows);
ggml_set_name(diff, (std::string("diff_") + std::to_string(il)).c_str());
diff->data = malloc(ggml_nbytes(diff)); // TODO: get rid of this malloc if possible
if (transpose) {
// copy data & transpose
float * arr = (float *) diff_tmp.data();
for (int ir = 0; ir < n_rows; ++ir) {
for (int ic = 0; ic < n_embd; ++ic) {
float f = arr[ir*n_embd + ic];
ggml_set_f32_nd(diff, ir, ic, 0, 0, f);
}
}
} else {
// only copy
memcpy(diff->data, diff_tmp.data(), ggml_nbytes(diff));
}
v_diff.push_back(diff);
print_debug_tensor(diff);
// free memory of diff_tmp
diff_tmp.resize(0);
}
}
~train_context() {
for (auto ptr : v_final) free(ptr->data);
for (auto ptr : v_diff) free(ptr->data);
// no need to free v_diff_tmp, since we didn't use malloc
ggml_free(ctx_ggml);
}
};
struct tokenized_prompt {<|fim_suffix|> const llama_model * model = llama_get_model(ctx);
const llama_vocab * vocab = llama_model_get_vocab(model);
const bool add_bos = llama_vocab_get_add_bos(vocab);
tokens_pos = common_tokenize(ctx, pos, add_bos, true);
tokens_neg = common_tokenize(ctx, neg, add_bos, true);
max_seq_len = std::max(tokens_pos.size(), tokens_neg.size());
padding_seq(ctx, tokens_pos, max_seq_len);
padding_seq(ctx, tokens_neg, max_seq_len);
}
void padding_seq(llama_context * ctx, std::vector<llama_token> & tokens, size_t len) {
// TODO: customize padding token
std::vector<llama_token> pad_tokens = common_tokenize(ctx, " ", false);
llama_token pad_tok = pad_tokens.back();
while (tokens.size() < len) {
tokens.push_back(pad_tok);
}
}
};
//////////////////////////////////////////////////
template <typename T>
static std::string to_string(const T & val) {
std::stringstream ss;
ss << val;
return ss.str();
}
static std::vector<std::string> ctrlvec_load_prompt_file(std::string path, bool skip_empty_lines) {
std::vector<std::string> output;
std::ifstream file(path);
if (!file.is_open()) {
fprintf(stderr, "error: unable to open file: %s\n", path.c_str());
exit(1);
}
std::string line;
while (std::getline(file, line)) {
bool is_skip = skip_empty_lines && line.empty();
if (!is_skip) {
string_process_escapes(line);
output.push_back(line);
}
}
file.close();
return output;
}
//////////////////////////////////////////////////
static bool cb_eval(struct ggml_tensor * t, bool ask, void * user_data) {
auto * cb_data = (callback_data *) user_data;
static const char * l_out_name = "l_out";
const bool is_l_out = strncmp(t->name, l_out_name, strlen(l_out_name)) == 0;
if (ask) {
return is_l_out;
}
<|fim_middle|> std::vector<llama_token> tokens_pos;
std::vector<llama_token> tokens_neg;
size_t max_seq_len;
tokenized_prompt(llama_context * ctx, std::string pos, std::string neg) {
|
diff_tmp.resize(curr_size + ggml_nbytes(t));
memcpy(diff_tmp.data() + curr_size, t->data, ggml_nbytes(t));
}
}
// build the v_diff tensors from v_diff_tmp (v_diff need to be transposed)
// TODO @ngxson : maybe add option NOT to transpose v_diff; will be useful for "mean" method
void build_v_diff(bool transpose) {
printf("build_v_diff\n");
for (int il = 0; il < n_layers - 1; il++) {
auto & diff_tmp = v_diff_tmp[il];
int n_elem = diff_tmp.size() / sizeof(float);
GGML_ASSERT(n_elem % n_embd == 0);
int n_rows = n_elem / n_embd;
struct ggml_tensor * diff = transpose
? ggml_new_tensor_2d(ctx_ggml, GGML_TYPE_F32, n_rows, n_embd)
: ggml_new_tensor_2d(ctx_ggml, GGML_TYPE_F32, n_embd, n_rows);
ggml_set_name(diff, (std::string("diff_") + std::to_string(il)).c_str());
diff->data = malloc(ggml_nbytes(diff)); // TODO: get rid of this malloc if possible
if (transpose) {
// copy data & transpose
float * arr = (float *) diff_tmp.data();
for (int ir = 0; ir < n_rows; ++ir) {
for (int ic = 0; ic < n_embd; ++ic) {
float f = arr[ir*n_embd + ic];
ggml_set_f32_nd(diff, ir, ic, 0, 0, f);
}
}
} else {
// only copy
memcpy(diff->data, diff_tmp.data(), ggml_nbytes(diff));
}
v_diff.push_back(diff);
print_debug_tensor(diff);
// free memory of diff_tmp
diff_tmp.resize(0);
}
}
~train_context() {
for (auto ptr : v_final) free(ptr->data);
for (auto ptr : v_diff) free(ptr->data);
// no need to free v_diff_tmp, since we didn't use malloc
ggml_free(ctx_ggml);
}
};
struct tokenized_prompt {
|
std::vector<llama_token> tokens_pos;
std::vector<llama_token> tokens_neg;
size_t max_seq_len;
tokenized_prompt(llama_context * ctx, std::string pos, std::string neg) {
|
const llama_model * model = llama_get_model(ctx);
const llama_vocab * vocab = llama_model_get_vocab(model);
const bool add_bos = llama_vocab_get_add_bos(vocab);
tokens_pos = common_tokenize(ctx, pos, add_bos, true);
tokens_neg = common_tokenize(ctx, neg, add_bos, true);
max_seq_len = std::max(tokens_pos.size(), tokens_neg.size());
padding_seq(ctx, tokens_pos, max_seq_len);
padding_seq(ctx, tokens_neg, max_seq_len);
}
void padding_seq(llama_context * ctx, std::vector<llama_token> & tokens, size_t len) {
// TODO: customize padding token
std::vector<llama_token> pad_tokens = common_tokenize(ctx, " ", false);
llama_token pad_tok = pad_tokens.back();
while (tokens.size() < len) {
tokens.push_back(pad_tok);
}
}
};
//////////////////////////////////////////////////
template <typename T>
static std::string to_string(const T & val) {
std::stringstream ss;
ss << val;
return ss.str();
}
static std::vector<std::string> ctrlvec_load_prompt_file(std::string path, bool skip_empty_lines) {
std::vector<std::string> output;
std::ifstream file(path);
if (!file.is_open()) {
fprintf(stderr, "error: unable to open file: %s\n", path.c_str());
exit(1);
}
std::string line;
while (std::getline(file, line)) {
bool is_skip = skip_empty_lines && line.empty();
if (!is_skip) {
string_process_escapes(line);
output.push_back(line);
}
}
file.close();
return output;
}
//////////////////////////////////////////////////
static bool cb_eval(struct ggml_tensor * t, bool ask, void * user_data) {
auto * cb_data = (callback_data *) user_data;
static const char * l_out_name = "l_out";
const bool is_l_out = strncmp(t->name, l_out_name, strlen(l_out_name)) == 0;
if (ask) {
return is_l_out;
}
|
random
|
<|fim_prefix|> * indirect, incidental, special, exemplary, or consequential damages
* (including, but not limited to, procurement of substitute goods or services;
* loss of use, data, or profits; or business interruption) however caused
* and on any theory of liability, whether in contract, strict liability,
* or tort (including negligence or otherwise) arising in any way out of
* the use of this software, even if advised of the possibility of such damage.
*/
#ifndef _tegra_hal_H_INCLUDED_
#define _tegra_hal_H_INCLUDED_
#define CAROTENE_NS carotene_o4t
#include "carotene/functions.hpp"
#include <cstddef>
#include <cstring>
#include <vector>
#include <opencv2/core/base.hpp>
#define RANGE_DATA(type, base, step) reinterpret_cast<type*>(const_cast<char *>(reinterpret_cast<const char *>(base)) + static_cast<size_t>(range.start) * step)
#define PARALLEL_CORE 0
#if PARALLEL_CORE
#define SRC_ARG1 ST * src1_data_, size_t src1_step_,
#define SRC_STORE1 src1_data(src1_data_), src1_step(src1_step_),
#define SRC_VAR1 ST * src1_data; \
size_t src1_step;
#define SRC_ARG2 ST * src1_data_, size_t src1_step_, \
ST * src2_data_, size_t src2_step_,
#define SRC_STORE2 src1_data(src1_data_), src1_step(src1_step_), \
src2_data(src2_data_), src2_step(src2_step_),
#define SRC_VAR2 ST * src1_data; \
size_t src1_step; \
ST * src2_data; \
size_t src2_step;
#define DST_ARG1 DT * dst1_data_, size_t dst1_step_,
#define DST_STORE1 dst1_data(dst1_data_), dst1_step(dst1_step_),
#define DST_VAR1 DT * dst1_data; \
size_t dst1_step;
#define SCALE_ARG0
#define SCALE_STORE0
#define SCALE_VAR0
#define SCALE_ARG1 , double scale_
#define SCALE_STORE1 , scale(scale_)
#define SCALE_VAR1 double scale;
#define SCALE_ARG3 , const double *scales_
#define SCALE_STORE3 , scales(scales_, scales_ + 3)
#define SCALE_VAR3 std::vector<double> scales;<|fim_suffix|> int width_, int height_ \
SCALE_ARG##scale_cnt) : \
cv::ParallelLoopBody(), SRC_STORE##src_cnt \
DST_STORE##dst_cnt \
width(width_), height(height_) \
SCALE_STORE##scale_cnt {} \
virtual void operator()(const cv::Range& range) const \
{ \
CAROTENE_NS::func(CAROTENE_NS::Size2D(width, range.end-range.start), __VA_ARGS__); \
} \
private: \
SRC_VAR##src_cnt \
DST_VAR##dst_cnt \
int width, height; \
SCALE_VAR##scale_cnt \
const TegraGenOp_##name##_Invoker& operator= (const TegraGenOp_##name##_Invoker&); \
};
#define TegraBinaryOp_Invoker(name, func) TegraGenOp_Invoker(name, func, 2, 1, 0, \
RANGE_DATA(ST, src1_data, src1_step), src1_step, \
RANGE_DATA(ST, src2_data, src2_step), src2_step, \
RANGE_DATA(DT, dst1_data, dst1_step), dst1_step )
#define TegraBinaryOp_InvokerVAArg(name, func, ...) TegraGenOp_Invoker(name, func, 2, 1, 0, \
RANGE_DATA(ST, src1_data, src1_step), src1_step, \
RANGE_DATA(ST, src2_data, src2_step), src2_step, \
RANGE_DATA(DT, dst1_data, dst1_step), dst1_step, __VA_ARGS__)
#define TEGRA_BINARYOP(type, op, src1, sz1, src2, sz2, dst, sz, w, h) \
( \
CAROTENE_NS::isSupportedConfiguration() ? \
parallel_for_(cv::Range(0, h), \
TegraGenOp_##op##_Invoker<const type, type>(src1, sz1, src2, sz2, dst, sz, w, h), \
(w * h) / static_cast<double>(1<<16)), \
CV_HAL_ERROR_OK \
: CV_HAL_ERROR_NOT_IMPLEMENTED \
)
<|fim_middle|>
#define TegraGenOp_Invoker(name, func, src_cnt, dst_cnt, scale_cnt, ...) \
template <typename ST, typename DT> \
class TegraGenOp_##name##_Invoker : public cv::ParallelLoopBody \
{ \
public: \
TegraGenOp_##name##_Invoker(SRC_ARG##src_cnt \
DST_ARG##dst_cnt \
|
* indirect, incidental, special, exemplary, or consequential damages
* (including, but not limited to, procurement of substitute goods or services;
* loss of use, data, or profits; or business interruption) however caused
* and on any theory of liability, whether in contract, strict liability,
* or tort (including negligence or otherwise) arising in any way out of
* the use of this software, even if advised of the possibility of such damage.
*/
#ifndef _tegra_hal_H_INCLUDED_
#define _tegra_hal_H_INCLUDED_
#define CAROTENE_NS carotene_o4t
#include "carotene/functions.hpp"
#include <cstddef>
#include <cstring>
#include <vector>
#include <opencv2/core/base.hpp>
#define RANGE_DATA(type, base, step) reinterpret_cast<type*>(const_cast<char *>(reinterpret_cast<const char *>(base)) + static_cast<size_t>(range.start) * step)
#define PARALLEL_CORE 0
#if PARALLEL_CORE
#define SRC_ARG1 ST * src1_data_, size_t src1_step_,
#define SRC_STORE1 src1_data(src1_data_), src1_step(src1_step_),
#define SRC_VAR1 ST * src1_data; \
size_t src1_step;
#define SRC_ARG2 ST * src1_data_, size_t src1_step_, \
ST * src2_data_, size_t src2_step_,
#define SRC_STORE2 src1_data(src1_data_), src1_step(src1_step_), \
src2_data(src2_data_), src2_step(src2_step_),
#define SRC_VAR2 ST * src1_data; \
size_t src1_step; \
ST * src2_data; \
size_t src2_step;
#define DST_ARG1 DT * dst1_data_, size_t dst1_step_,
#define DST_STORE1 dst1_data(dst1_data_), dst1_step(dst1_step_),
#define DST_VAR1 DT * dst1_data; \
size_t dst1_step;
#define SCALE_ARG0
#define SCALE_STORE0
#define SCALE_VAR0
#define SCALE_ARG1 , double scale_
#define SCALE_STORE1 , scale(scale_)
#define SCALE_VAR1 double scale;
#define SCALE_ARG3 , const double *scales_
#define SCALE_STORE3 , scales(scales_, scales_ + 3)
#define SCALE_VAR3 std::vector<double> scales;
|
#define TegraGenOp_Invoker(name, func, src_cnt, dst_cnt, scale_cnt, ...) \
template <typename ST, typename DT> \
class TegraGenOp_##name##_Invoker : public cv::ParallelLoopBody \
{ \
public: \
TegraGenOp_##name##_Invoker(SRC_ARG##src_cnt \
DST_ARG##dst_cnt \
|
int width_, int height_ \
SCALE_ARG##scale_cnt) : \
cv::ParallelLoopBody(), SRC_STORE##src_cnt \
DST_STORE##dst_cnt \
width(width_), height(height_) \
SCALE_STORE##scale_cnt {} \
virtual void operator()(const cv::Range& range) const \
{ \
CAROTENE_NS::func(CAROTENE_NS::Size2D(width, range.end-range.start), __VA_ARGS__); \
} \
private: \
SRC_VAR##src_cnt \
DST_VAR##dst_cnt \
int width, height; \
SCALE_VAR##scale_cnt \
const TegraGenOp_##name##_Invoker& operator= (const TegraGenOp_##name##_Invoker&); \
};
#define TegraBinaryOp_Invoker(name, func) TegraGenOp_Invoker(name, func, 2, 1, 0, \
RANGE_DATA(ST, src1_data, src1_step), src1_step, \
RANGE_DATA(ST, src2_data, src2_step), src2_step, \
RANGE_DATA(DT, dst1_data, dst1_step), dst1_step )
#define TegraBinaryOp_InvokerVAArg(name, func, ...) TegraGenOp_Invoker(name, func, 2, 1, 0, \
RANGE_DATA(ST, src1_data, src1_step), src1_step, \
RANGE_DATA(ST, src2_data, src2_step), src2_step, \
RANGE_DATA(DT, dst1_data, dst1_step), dst1_step, __VA_ARGS__)
#define TEGRA_BINARYOP(type, op, src1, sz1, src2, sz2, dst, sz, w, h) \
( \
CAROTENE_NS::isSupportedConfiguration() ? \
parallel_for_(cv::Range(0, h), \
TegraGenOp_##op##_Invoker<const type, type>(src1, sz1, src2, sz2, dst, sz, w, h), \
(w * h) / static_cast<double>(1<<16)), \
CV_HAL_ERROR_OK \
: CV_HAL_ERROR_NOT_IMPLEMENTED \
)
|
random
|
<|fim_prefix|>nst RID &p_id, double p_min, double p_max) override;
void accessibility_update_set_num_step(const RID &p_id, double p_step) override;
void accessibility_update_set_num_jump(const RID &p_id, double p_jump) override;
void accessibility_update_set_scroll_x(const RID &p_id, double p_position) override;
void accessibility_update_set_scroll_x_range(const RID &p_id, double p_min, double p_max) override;
void accessibility_update_set_scroll_y(const RID &p_id, double p_position) override;
void accessibility_update_set_scroll_y_range(const RID &p_id, double p_min, double p_max) override;
void accessibility_update_set_text_decorations(const RID &p_id, bool p_underline, bool p_strikethrough, bool p_overline) override;
void accessibility_update_set_text_align(const RID &p_id, HorizontalAlignment p_align) override;
void accessibility_update_set_text_selection(const RID &p_id, const RID &p_text_start_id, int p_start_char, const RID &p_text_end_id, int p_end_char) override;
void accessibility_update_set_flag(const RID &p_id, DisplayServer::AccessibilityFlags p_flag, bool p_value) override;
void accessibility_update_set_classname(const RID &p_id, const String &p_classname) override;
void accessibility_update_set_placeholder(const RID &p_id, const String &p_placeholder) override;
void accessibility_update_set_language(const RID &p_id, const String &p_language) override;
void accessibility_update_set_text_orientation(const RID &p_id, bool p_vertical) override;
void accessibility_update_set_list_orientation(const RID &p_id, bool p_vertical) override;
void accessibility_update_set_shortcut(const RID &p_id, const String &p_shortcut) override;
void accessibility_update_set_url(const RID &p_id, const String &p_url) override;
void accessibility_update_set_role_description(const RID &p_id, const String &p_description) override;
void accessibility_update_set_state_description(const RID &p_id, const String &p_description) override;
void accessibility_update_set_color_value(<|fim_suffix|>, const Color &p_color) override;
void accessibility_update_set_background_color(const RID &p_id, const Color &p_color) override;
void accessibility_update_set_foreground_color(const RID &p_id, const Color &p_color) override;
AccessibilityDriverAccessKit();
~AccessibilityDriverAccessKit();
};
#endif // ACCESSKIT_ENABLED
<|fim_middle|>const RID &p_id
|
nst RID &p_id, double p_min, double p_max) override;
void accessibility_update_set_num_step(const RID &p_id, double p_step) override;
void accessibility_update_set_num_jump(const RID &p_id, double p_jump) override;
void accessibility_update_set_scroll_x(const RID &p_id, double p_position) override;
void accessibility_update_set_scroll_x_range(const RID &p_id, double p_min, double p_max) override;
void accessibility_update_set_scroll_y(const RID &p_id, double p_position) override;
void accessibility_update_set_scroll_y_range(const RID &p_id, double p_min, double p_max) override;
void accessibility_update_set_text_decorations(const RID &p_id, bool p_underline, bool p_strikethrough, bool p_overline) override;
void accessibility_update_set_text_align(const RID &p_id, HorizontalAlignment p_align) override;
void accessibility_update_set_text_selection(const RID &p_id, const RID &p_text_start_id, int p_start_char, const RID &p_text_end_id, int p_end_char) override;
void accessibility_update_set_flag(const RID &p_id, DisplayServer::AccessibilityFlags p_flag, bool p_value) override;
void accessibility_update_set_classname(const RID &p_id, const String &p_classname) override;
void accessibility_update_set_placeholder(const RID &p_id, const String &p_placeholder) override;
void accessibility_update_set_language(const RID &p_id, const String &p_language) override;
void accessibility_update_set_text_orientation(const RID &p_id, bool p_vertical) override;
void accessibility_update_set_list_orientation(const RID &p_id, bool p_vertical) override;
void accessibility_update_set_shortcut(const RID &p_id, const String &p_shortcut) override;
void accessibility_update_set_url(const RID &p_id, const String &p_url) override;
void accessibility_update_set_role_description(const RID &p_id, const String &p_description) override;
void accessibility_update_set_state_description(const RID &p_id, const String &p_description) override;
void accessibility_update_set_color_value(
|
const RID &p_id
|
, const Color &p_color) override;
void accessibility_update_set_background_color(const RID &p_id, const Color &p_color) override;
void accessibility_update_set_foreground_color(const RID &p_id, const Color &p_color) override;
AccessibilityDriverAccessKit();
~AccessibilityDriverAccessKit();
};
#endif // ACCESSKIT_ENABLED
|
ast_based
|
<|fim_prefix|> void * prune_layers; // pointer to vector containing layer indices to prune
} llama_model_quantize_params;
typedef struct llama_logit_bias {
llama_token token;
float bias;
} llama_logit_bias;
typedef struct llama_sampler_chain_params {
bool no_perf; // whether to measure performance timings
} llama_sampler_chain_params;
// used in chat template
typedef struct llama_chat_message {
const char * role;
const char * content;
} llama_chat_message;
// lora adapter
struct llama_adapter_lora;
// Helpers for getting default parameters
// TODO: update API to start accepting pointers to params structs (https://github.com/ggml-org/llama.cpp/discussions/9172)
LLAMA_API struct llama_model_params llama_model_default_params(void);
LLAMA_API struct llama_context_params llama_context_default_params(void);
LLAMA_API struct llama_sampler_chain_params llama_sampler_chain_default_params(void);
LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(void);
// Initialize the llama + ggml backend
// If numa is true, use NUMA optimizations
// Call once at the start of the program
LLAMA_API void llama_backend_init(void);
// Call once at the end of the program - currently only used for MPI
LLAMA_API void llama_backend_free(void);
//optional:
LLAMA_API void llama_numa_init(enum ggml_numa_strategy numa);
// Optional: an auto threadpool gets created in ggml if not passed explicitly
LLAMA_API void llama_attach_threadpool(
struct llama_context * ctx,
ggml_threadpool_t threadpool,
ggml_threadpool_t threadpool_batch);
LLAMA_API void llama_detach_threadpool(struct llama_context * ctx);
DEPRECATED(LLAMA_API struct llama_model * llama_load_model_from_file(<|fim_suffix|> // If the file is split into multiple parts, the file name must follow this pattern: <name>-%05d-of-%05d.gguf
// If the split file name does not follow this pattern, use llama_model_load_from_splits
LLAMA_API struct llama_model * llama_model_load_from_file(
const char * path_model,
struct llama_model_params params);
// Load the model from multiple splits (support custom naming scheme)
// The paths must be in the correct order
LLAMA_API struct llama_model * llama_model_load_from_splits(
const char ** paths,
size_t n_paths,
struct llama_model_params params);
LLAMA_API void llama_model_save_to_file(
const struct llama_model * model,
const char * path_model);
DEPRECATED(LLAMA_API void llama_free_model(struct llama_model * model),
"use llama_model_free instead");
LLAMA_API void llama_model_free(struct llama_model * model);
LLAMA_API struct llama_context * llama_init_from_model(
struct llama_model * model,
struct llama_context_params params);
DEPRECATED(LLAMA_API struct llama_context * llama_new_context_with_model(
struct llama_model * model,
struct llama_context_params params),
"use llama_init_from_model instead");
// Frees all allocated memory
LLAMA_API void llama_free(struct llama_context * ctx);
LLAMA_API int64_t llama_time_us(void);
LLAMA_API size_t llama_max_devices(void);
LLAMA_API size_t llama_max_parallel_sequences(void);
LLAMA_API bool llama_supports_mmap (void);
LLAMA_API bool llama_supports_mlock (void);
LLAMA_API bool llama_supports_gpu_offload(void);
LLAMA_API bool llama_supports_rpc (void);
LLAMA_API uint32_t llama_n_ctx (const struct llama_context * ctx);<|fim_middle|> const char * path_model,
struct llama_model_params params),
"use llama_model_load_from_file instead");
// Load the model from a file
|
void * prune_layers; // pointer to vector containing layer indices to prune
} llama_model_quantize_params;
typedef struct llama_logit_bias {
llama_token token;
float bias;
} llama_logit_bias;
typedef struct llama_sampler_chain_params {
bool no_perf; // whether to measure performance timings
} llama_sampler_chain_params;
// used in chat template
typedef struct llama_chat_message {
const char * role;
const char * content;
} llama_chat_message;
// lora adapter
struct llama_adapter_lora;
// Helpers for getting default parameters
// TODO: update API to start accepting pointers to params structs (https://github.com/ggml-org/llama.cpp/discussions/9172)
LLAMA_API struct llama_model_params llama_model_default_params(void);
LLAMA_API struct llama_context_params llama_context_default_params(void);
LLAMA_API struct llama_sampler_chain_params llama_sampler_chain_default_params(void);
LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(void);
// Initialize the llama + ggml backend
// If numa is true, use NUMA optimizations
// Call once at the start of the program
LLAMA_API void llama_backend_init(void);
// Call once at the end of the program - currently only used for MPI
LLAMA_API void llama_backend_free(void);
//optional:
LLAMA_API void llama_numa_init(enum ggml_numa_strategy numa);
// Optional: an auto threadpool gets created in ggml if not passed explicitly
LLAMA_API void llama_attach_threadpool(
struct llama_context * ctx,
ggml_threadpool_t threadpool,
ggml_threadpool_t threadpool_batch);
LLAMA_API void llama_detach_threadpool(struct llama_context * ctx);
DEPRECATED(LLAMA_API struct llama_model * llama_load_model_from_file(
|
const char * path_model,
struct llama_model_params params),
"use llama_model_load_from_file instead");
// Load the model from a file
|
// If the file is split into multiple parts, the file name must follow this pattern: <name>-%05d-of-%05d.gguf
// If the split file name does not follow this pattern, use llama_model_load_from_splits
LLAMA_API struct llama_model * llama_model_load_from_file(
const char * path_model,
struct llama_model_params params);
// Load the model from multiple splits (support custom naming scheme)
// The paths must be in the correct order
LLAMA_API struct llama_model * llama_model_load_from_splits(
const char ** paths,
size_t n_paths,
struct llama_model_params params);
LLAMA_API void llama_model_save_to_file(
const struct llama_model * model,
const char * path_model);
DEPRECATED(LLAMA_API void llama_free_model(struct llama_model * model),
"use llama_model_free instead");
LLAMA_API void llama_model_free(struct llama_model * model);
LLAMA_API struct llama_context * llama_init_from_model(
struct llama_model * model,
struct llama_context_params params);
DEPRECATED(LLAMA_API struct llama_context * llama_new_context_with_model(
struct llama_model * model,
struct llama_context_params params),
"use llama_init_from_model instead");
// Frees all allocated memory
LLAMA_API void llama_free(struct llama_context * ctx);
LLAMA_API int64_t llama_time_us(void);
LLAMA_API size_t llama_max_devices(void);
LLAMA_API size_t llama_max_parallel_sequences(void);
LLAMA_API bool llama_supports_mmap (void);
LLAMA_API bool llama_supports_mlock (void);
LLAMA_API bool llama_supports_gpu_offload(void);
LLAMA_API bool llama_supports_rpc (void);
LLAMA_API uint32_t llama_n_ctx (const struct llama_context * ctx);
|
random
|
<|fim_prefix|>#pragma once
#include "llama.h"
#include "ggml-cpp.h"
#include <string>
#include <unordered_map>
#include <vector>
// TODO: pimpl
//
// llama_adapter_cvec
//
struct llama_adapter_cvec {
ggml_tensor * tensor_for(int il) const;
ggml_tensor * apply_to(ggml_context * ctx, ggml_tensor * cur, int il) const;
bool apply(
const llama_model & model,
const float * data,
size_t len,
int32_t n_embd,
int32_t il_start,
int32_t il_end);
private:
bool init(const llama_model & model);
int32_t layer_start = -1;
int32_t layer_end = -1;
std::vector<ggml_context_ptr> ctxs;
std::vector<ggml_backend_buffer_ptr> bufs;
std::vector<ggml_tensor *> tensors; // per layer
};
//
// llama_adapter_lora
//
struct llama_adapter_lora_weight {
ggml_tensor * a = nullptr;
ggml_tensor * b = nullptr;
// get actual scale based on rank and alpha<|fim_suffix|> const float scale = alpha ? adapter_scale * alpha / rank : adapter_scale;
return scale;
}
llama_adapter_lora_weight() = default;
llama_adapter_lora_weight(ggml_tensor * a, ggml_tensor * b) : a(a), b(b) {}
};
struct llama_adapter_lora {
// map tensor name to lora_a_b
std::unordered_map<std::string, llama_adapter_lora_weight> ab_map;
std::vector<ggml_context_ptr> ctxs;
std::vector<ggml_backend_buffer_ptr> bufs;
float alpha;
// gguf metadata
std::unordered_map<std::string, std::string> gguf_kv;
// activated lora (aLoRA)
std::vector<llama_token> alora_invocation_tokens;
llama_adapter_lora() = default;
~llama_adapter_lora() = default;
llama_adapter_lora_weight * get_weight(ggml_tensor * w);
};
using llama_adapter_loras = std::unordered_map<llama_adapter_lora *, float>;
<|fim_middle|> float get_scale(float alpha, float adapter_scale) const {
const float rank = (float) b->ne[0];
|
#pragma once
#include "llama.h"
#include "ggml-cpp.h"
#include <string>
#include <unordered_map>
#include <vector>
// TODO: pimpl
//
// llama_adapter_cvec
//
struct llama_adapter_cvec {
ggml_tensor * tensor_for(int il) const;
ggml_tensor * apply_to(ggml_context * ctx, ggml_tensor * cur, int il) const;
bool apply(
const llama_model & model,
const float * data,
size_t len,
int32_t n_embd,
int32_t il_start,
int32_t il_end);
private:
bool init(const llama_model & model);
int32_t layer_start = -1;
int32_t layer_end = -1;
std::vector<ggml_context_ptr> ctxs;
std::vector<ggml_backend_buffer_ptr> bufs;
std::vector<ggml_tensor *> tensors; // per layer
};
//
// llama_adapter_lora
//
struct llama_adapter_lora_weight {
ggml_tensor * a = nullptr;
ggml_tensor * b = nullptr;
// get actual scale based on rank and alpha
|
float get_scale(float alpha, float adapter_scale) const {
const float rank = (float) b->ne[0];
|
const float scale = alpha ? adapter_scale * alpha / rank : adapter_scale;
return scale;
}
llama_adapter_lora_weight() = default;
llama_adapter_lora_weight(ggml_tensor * a, ggml_tensor * b) : a(a), b(b) {}
};
struct llama_adapter_lora {
// map tensor name to lora_a_b
std::unordered_map<std::string, llama_adapter_lora_weight> ab_map;
std::vector<ggml_context_ptr> ctxs;
std::vector<ggml_backend_buffer_ptr> bufs;
float alpha;
// gguf metadata
std::unordered_map<std::string, std::string> gguf_kv;
// activated lora (aLoRA)
std::vector<llama_token> alora_invocation_tokens;
llama_adapter_lora() = default;
~llama_adapter_lora() = default;
llama_adapter_lora_weight * get_weight(ggml_tensor * w);
};
using llama_adapter_loras = std::unordered_map<llama_adapter_lora *, float>;
|
random
|
<|fim_prefix|> a set of parameter name, value pairs.
* Searches the standard places: tessdata/configs, tessdata/tessconfigs
* and also accepts a relative or absolute path name.
*/
void TessBaseAPI::ReadConfigFile(const char *filename) {
tesseract_->read_config_file(filename, SET_PARAM_CONSTRAINT_NON_INIT_ONLY);
}
/** Same as above, but only set debug params from the given config file. */
void TessBaseAPI::ReadDebugConfigFile(const char *filename) {
tesseract_->read_config_file(filename, SET_PARAM_CONSTRAINT_DEBUG_ONLY);
}
/**
* Set the current page segmentation mode. Defaults to PSM_AUTO.
* The mode is stored as an IntParam so it can also be modified by
* ReadConfigFile or SetVariable("tessedit_pageseg_mode", mode as string).
*/
void TessBaseAPI::SetPageSegMode(PageSegMode mode) {
if (tesseract_ == nullptr) {
tesseract_ = new Tesseract;
}
tesseract_->tessedit_pageseg_mode.set_value(mode);
}
/** Return the current page segmentation mode. */
PageSegMode TessBaseAPI::GetPageSegMode() const {
if (tesseract_ == nullptr) {
return PSM_SINGLE_BLOCK;
}
return static_cast<PageSegMode>(static_cast<int>(tesseract_->tessedit_pageseg_mode));
}
/**
* Recognize a rectangle from an image and return the result as a string.
* May be called many times for a single Init.
* Currently has no error checking.
* Greyscale of 8 and color of 24 or 32 bits per pixel may be given.
* Palette color images will not work properly and must be converted to
* 24 bit.
* Binary images of 1 bit per pixel may also be given but they must be
* byte packed with the MSB of the first byte being the first pixel, and a
* one pixel is WHITE. For binary images set bytes_per_pixel=0.
* The recognized text is returned as a char* which is coded
* as UTF8 and must be freed with the delete [] operator.
*/
char *TessBaseAPI::TesseractRect(const unsigned char *imagedata, int bytes_per_pixel,
int bytes_per_line, int left, int top, int width, int height) {
<|fim_suffix|>
// Since this original api didn't give the exact size of the image,
// we have to invent a reasonable value.
int bits_per_pixel = bytes_per_pixel == 0 ? 1 : bytes_per_pixel * 8;
SetImage(imagedata, bytes_per_line * 8 / bits_per_pixel, height + top, bytes_per_pixel,
bytes_per_line);
SetRectangle(left, top, width, height);
return GetUTF8Text();
}
#ifndef DISABLED_LEGACY_ENGINE
/**
* Call between pages or documents etc to free up memory and forget
* adaptive data.
*/
void TessBaseAPI::ClearAdaptiveClassifier() {
if (tesseract_ == nullptr) {
return;
}
tesseract_->ResetAdaptiveClassifier();
tesseract_->ResetDocumentDictionary();
}
#endif // ndef DISABLED_LEGACY_ENGINE
/**
* Provide an image for Tesseract to recognize. Format is as
* TesseractRect above. Copies the image buffer and converts to Pix.
* SetImage clears all recognition results, and sets the rectangle to the
* full image, so it may be followed immediately by a GetUTF8Text, and it
* will automatically perform recognition.
*/
void TessBaseAPI::SetImage(const unsigned char *imagedata, int width, int height,
int bytes_per_pixel, int bytes_per_line) {
if (InternalSetImage()) {
thresholder_->SetImage(imagedata, width, height, bytes_per_pixel, bytes_per_line);
SetInputImage(thresholder_->GetPixRect());
}
}
void TessBaseAPI::SetSourceResolution(int ppi) {
if (thresholder_) {
thresholder_->SetSourceYResolution(ppi);
} else {
tprintf("Please call SetImage before SetSourceResolution.\n");
}
}
/**
* Provide an image for Tesseract to recognize. As with SetImage above,
* Tesseract takes its own copy of the image, so it need not persist until
* after Recognize.
* Pix vs raw, which to use?
* Use Pix where possible. Tesseract uses Pix as its internal representation
* and it is therefore more efficient to provide a Pix directly.
*/
void TessBaseAPI::SetImage(Pix *pix) {
if (InternalSetImage()) {
if (pixGetSpp(pix<|fim_middle|>if (tesseract_ == nullptr || width < kMinRectSize || height < kMinRectSize) {
return nullptr; // Nothing worth doing.
}
|
a set of parameter name, value pairs.
* Searches the standard places: tessdata/configs, tessdata/tessconfigs
* and also accepts a relative or absolute path name.
*/
void TessBaseAPI::ReadConfigFile(const char *filename) {
tesseract_->read_config_file(filename, SET_PARAM_CONSTRAINT_NON_INIT_ONLY);
}
/** Same as above, but only set debug params from the given config file. */
void TessBaseAPI::ReadDebugConfigFile(const char *filename) {
tesseract_->read_config_file(filename, SET_PARAM_CONSTRAINT_DEBUG_ONLY);
}
/**
* Set the current page segmentation mode. Defaults to PSM_AUTO.
* The mode is stored as an IntParam so it can also be modified by
* ReadConfigFile or SetVariable("tessedit_pageseg_mode", mode as string).
*/
void TessBaseAPI::SetPageSegMode(PageSegMode mode) {
if (tesseract_ == nullptr) {
tesseract_ = new Tesseract;
}
tesseract_->tessedit_pageseg_mode.set_value(mode);
}
/** Return the current page segmentation mode. */
PageSegMode TessBaseAPI::GetPageSegMode() const {
if (tesseract_ == nullptr) {
return PSM_SINGLE_BLOCK;
}
return static_cast<PageSegMode>(static_cast<int>(tesseract_->tessedit_pageseg_mode));
}
/**
* Recognize a rectangle from an image and return the result as a string.
* May be called many times for a single Init.
* Currently has no error checking.
* Greyscale of 8 and color of 24 or 32 bits per pixel may be given.
* Palette color images will not work properly and must be converted to
* 24 bit.
* Binary images of 1 bit per pixel may also be given but they must be
* byte packed with the MSB of the first byte being the first pixel, and a
* one pixel is WHITE. For binary images set bytes_per_pixel=0.
* The recognized text is returned as a char* which is coded
* as UTF8 and must be freed with the delete [] operator.
*/
char *TessBaseAPI::TesseractRect(const unsigned char *imagedata, int bytes_per_pixel,
int bytes_per_line, int left, int top, int width, int height) {
|
if (tesseract_ == nullptr || width < kMinRectSize || height < kMinRectSize) {
return nullptr; // Nothing worth doing.
}
|
// Since this original api didn't give the exact size of the image,
// we have to invent a reasonable value.
int bits_per_pixel = bytes_per_pixel == 0 ? 1 : bytes_per_pixel * 8;
SetImage(imagedata, bytes_per_line * 8 / bits_per_pixel, height + top, bytes_per_pixel,
bytes_per_line);
SetRectangle(left, top, width, height);
return GetUTF8Text();
}
#ifndef DISABLED_LEGACY_ENGINE
/**
* Call between pages or documents etc to free up memory and forget
* adaptive data.
*/
void TessBaseAPI::ClearAdaptiveClassifier() {
if (tesseract_ == nullptr) {
return;
}
tesseract_->ResetAdaptiveClassifier();
tesseract_->ResetDocumentDictionary();
}
#endif // ndef DISABLED_LEGACY_ENGINE
/**
* Provide an image for Tesseract to recognize. Format is as
* TesseractRect above. Copies the image buffer and converts to Pix.
* SetImage clears all recognition results, and sets the rectangle to the
* full image, so it may be followed immediately by a GetUTF8Text, and it
* will automatically perform recognition.
*/
void TessBaseAPI::SetImage(const unsigned char *imagedata, int width, int height,
int bytes_per_pixel, int bytes_per_line) {
if (InternalSetImage()) {
thresholder_->SetImage(imagedata, width, height, bytes_per_pixel, bytes_per_line);
SetInputImage(thresholder_->GetPixRect());
}
}
void TessBaseAPI::SetSourceResolution(int ppi) {
if (thresholder_) {
thresholder_->SetSourceYResolution(ppi);
} else {
tprintf("Please call SetImage before SetSourceResolution.\n");
}
}
/**
* Provide an image for Tesseract to recognize. As with SetImage above,
* Tesseract takes its own copy of the image, so it need not persist until
* after Recognize.
* Pix vs raw, which to use?
* Use Pix where possible. Tesseract uses Pix as its internal representation
* and it is therefore more efficient to provide a Pix directly.
*/
void TessBaseAPI::SetImage(Pix *pix) {
if (InternalSetImage()) {
if (pixGetSpp(pix
|
ast_based
|
<|fim_prefix|>/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#include "animated_sprite_2d.h"
#include "scene/main/viewport.h"
#ifdef TOOLS_ENABLED
Dictionary AnimatedSprite2D::_edit_get_state() const {
Dictionary state = Node2D::_edit_get_state();
state["offset"] = offset;
return state;
}
void AnimatedSprite2D::_edit_set_state(const Dictionary &p_state) {
Node2D::_edit_set_state(p_state);
set_offset(p_state["offset"]);
}
void AnimatedSprite2D::_edit_set_pivot(const Point2 &p_pivot) {
set_offset(get_offset() - p_pivot);
set_position(get_transform().xform(p_pivot));
}
Point2 AnimatedSprite2D::_edit_get_pivot() const {
return Vector2();
}
bool AnimatedSprite2D::_edit_use_pivot() const {
return true;
}
#endif // TOOLS_ENABLED
#ifdef DEBUG_ENABLED
Rect2 AnimatedSprite2D::_edit_get_rect() const {
return _get_rect();
}
bool AnimatedSprite2D::_edit_use_rect() const {
if (frames.is_null() || !frames->has_animation(animation)) {
return false;
}
if (frame < 0 || frame >= frames->get_frame_count(animation)) {
return false;
}
Ref<Texture2D> t;
if (animation) {
t = frames->get_frame_texture(animation, frame);
}
return t.is_valid();
}
#endif // DEBUG_ENABLED
Rect2 AnimatedSprite2D::get_anchorable_rect() const {
return _get_rect();
}
Rect2 AnimatedSprite2D::_get_rect() const {
if (frames.is_null() || !frames->has_animation(animation)) {
return Rect2();
}
if (frame < 0 || frame >= frames->get_frame_count(animation)) {
return Rect2();
}
Ref<Texture2D> t;
if (animation) {
t = frames->get_frame_texture(animation, frame);<|fim_suffix|> Size2 s = t->get_size();
Point2 ofs = offset;
if (centered) {
ofs -= s / 2;
}
if (s == Size2(0, 0)) {
s = Size2(1, 1);
}
return Rect2(ofs, s);
}
void AnimatedSprite2D::_validate_property(PropertyInfo &p_property) const {
if (frames.is_null()) {
return;
}
if (!Engine::get_singleton()->is_editor_hint()) {
if (p_property.name == "frame" && playing) {
p_property.usage = PROPERTY_USAGE_EDITOR | PROPERTY_USAGE_READ_ONLY;
}
return;
}
if (p_property.name == "animation") {
List<StringName> names;
frames->get_animation_list(&names);
names.sort_custom<StringName::AlphCompare>();
bool current_found = false;
bool is_first_element = true;
for (const StringName &E : names) {
if (!is_first_element) {
p_property.hint_string += ",";
} else {
is_first_element = false;
}
p_property.hint_string += String(E);
if (animation == E) {
current_found = true;
}
}
if (!current_found) {
if (p_property.hint_string.is_empty()) {
p_property.hint_string = String(animation);
} else {
p_property.hint_string = String(animation) + "," + p_property.hint_string;
}
}
return;
}
if (p_property.name == "frame") {
if (playing) {
p_property.usage = PROPERTY_USAGE_EDITOR | PROPERTY_USAGE_READ_ONLY;
return;
}
p_property.hint = PROPERTY_HINT_RANGE;
if (frames->has_animation(animation) && frames->get_frame_count(animation) > 0) {
p_property.hint_string = "0," + itos(frames->get_frame_count(animation) - 1) + ",1";
} else {
// Avoid an error, `hint_string` is required for `PROPERTY_HINT_RANGE`.
p_property.hint_string = "0,0,1";
}
p_property.usage |= PROPERTY_USAGE_KEYING_INCREMENTS;
}
}
void AnimatedSprite2D::_notification(int p_what) {
switch (p_what) {
case NOTIFICATION_ACCESSIBILITY_UPDATE: {
RID ae = get_accessibility_element();
ERR_FAIL_COND(ae.is_null());
Rect2 dst_rect = _get_rect();
<|fim_middle|> }
if (t.is_null()) {
return Rect2();
}
|
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#include "animated_sprite_2d.h"
#include "scene/main/viewport.h"
#ifdef TOOLS_ENABLED
Dictionary AnimatedSprite2D::_edit_get_state() const {
Dictionary state = Node2D::_edit_get_state();
state["offset"] = offset;
return state;
}
void AnimatedSprite2D::_edit_set_state(const Dictionary &p_state) {
Node2D::_edit_set_state(p_state);
set_offset(p_state["offset"]);
}
void AnimatedSprite2D::_edit_set_pivot(const Point2 &p_pivot) {
set_offset(get_offset() - p_pivot);
set_position(get_transform().xform(p_pivot));
}
Point2 AnimatedSprite2D::_edit_get_pivot() const {
return Vector2();
}
bool AnimatedSprite2D::_edit_use_pivot() const {
return true;
}
#endif // TOOLS_ENABLED
#ifdef DEBUG_ENABLED
Rect2 AnimatedSprite2D::_edit_get_rect() const {
return _get_rect();
}
bool AnimatedSprite2D::_edit_use_rect() const {
if (frames.is_null() || !frames->has_animation(animation)) {
return false;
}
if (frame < 0 || frame >= frames->get_frame_count(animation)) {
return false;
}
Ref<Texture2D> t;
if (animation) {
t = frames->get_frame_texture(animation, frame);
}
return t.is_valid();
}
#endif // DEBUG_ENABLED
Rect2 AnimatedSprite2D::get_anchorable_rect() const {
return _get_rect();
}
Rect2 AnimatedSprite2D::_get_rect() const {
if (frames.is_null() || !frames->has_animation(animation)) {
return Rect2();
}
if (frame < 0 || frame >= frames->get_frame_count(animation)) {
return Rect2();
}
Ref<Texture2D> t;
if (animation) {
t = frames->get_frame_texture(animation, frame);
|
}
if (t.is_null()) {
return Rect2();
}
|
Size2 s = t->get_size();
Point2 ofs = offset;
if (centered) {
ofs -= s / 2;
}
if (s == Size2(0, 0)) {
s = Size2(1, 1);
}
return Rect2(ofs, s);
}
void AnimatedSprite2D::_validate_property(PropertyInfo &p_property) const {
if (frames.is_null()) {
return;
}
if (!Engine::get_singleton()->is_editor_hint()) {
if (p_property.name == "frame" && playing) {
p_property.usage = PROPERTY_USAGE_EDITOR | PROPERTY_USAGE_READ_ONLY;
}
return;
}
if (p_property.name == "animation") {
List<StringName> names;
frames->get_animation_list(&names);
names.sort_custom<StringName::AlphCompare>();
bool current_found = false;
bool is_first_element = true;
for (const StringName &E : names) {
if (!is_first_element) {
p_property.hint_string += ",";
} else {
is_first_element = false;
}
p_property.hint_string += String(E);
if (animation == E) {
current_found = true;
}
}
if (!current_found) {
if (p_property.hint_string.is_empty()) {
p_property.hint_string = String(animation);
} else {
p_property.hint_string = String(animation) + "," + p_property.hint_string;
}
}
return;
}
if (p_property.name == "frame") {
if (playing) {
p_property.usage = PROPERTY_USAGE_EDITOR | PROPERTY_USAGE_READ_ONLY;
return;
}
p_property.hint = PROPERTY_HINT_RANGE;
if (frames->has_animation(animation) && frames->get_frame_count(animation) > 0) {
p_property.hint_string = "0," + itos(frames->get_frame_count(animation) - 1) + ",1";
} else {
// Avoid an error, `hint_string` is required for `PROPERTY_HINT_RANGE`.
p_property.hint_string = "0,0,1";
}
p_property.usage |= PROPERTY_USAGE_KEYING_INCREMENTS;
}
}
void AnimatedSprite2D::_notification(int p_what) {
switch (p_what) {
case NOTIFICATION_ACCESSIBILITY_UPDATE: {
RID ae = get_accessibility_element();
ERR_FAIL_COND(ae.is_null());
Rect2 dst_rect = _get_rect();
|
random
|
<|fim_prefix|>eal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#include "animation_bezier_editor.h"
#include "editor/animation/animation_player_editor_plugin.h"
#include "editor/editor_node.h"
#include "editor/editor_string_names.h"
#include "editor/editor_undo_redo_manager.h"
#include "editor/gui/editor_spin_slider.h"
#include "editor/settings/editor_settings.h"
#include "editor/themes/editor_scale.h"
#include "scene/gui/option_button.h"
#include "scene/gui/view_panner.h"
#include "scene/resources/text_line.h"
#include <climits>
float AnimationBezierTrackEdit::_bezier_h_to_pixel(float p_h) {
float h = p_h;
h = (h - timeline_v_scroll) / timeline_v_zoom;
h = (get_size().height / 2.0) - h;
return h;
}
void AnimationBezierTrackEdit::_draw_track(int p_track, <|fim_suffix|>) {
float scale = timeline->get_zoom_scale();
int limit = timeline->get_name_limit();
int right_limit = get_size().width;
// Selection may have altered the order of keys.
RBMap<real_t, int> key_order;
for (int i = 0; i < animation->track_get_key_count(p_track); i++) {
real_t ofs = animation->track_get_key_time(p_track, i);
if (selection.has(IntPair(p_track, i))) {
if (moving_selection) {
ofs += moving_selection_offset.x;
} else if (scaling_selection) {
ofs += -scaling_selection_offset.x + (ofs - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
}
}
key_order[ofs] = i;
}
for (RBMap<real_t, int>::Element *E = key_order.front(); E; E = E->next()) {
int i = E->get();
if (!E->next()) {
break;
}
int i_n = E->next()->get();
float offset = animation->track_get_key_time(p_track, i);
float height = animation->bezier_track_get_key_value(p_track, i);
Vector2 out_handle = animation->bezier_track_get_key_out_handle(p_track, i);
if (p_track == moving_handle_track && (moving_handle == -1 || moving_handle == 1) && moving_handle_key == i) {
out_handle = moving_handle_right;
}
if (selection.has(IntPair(p_track, i))) {
if (moving_selection) {
offset += moving_selection_offset.x;
height += moving_selection_offset.y;
} else if (scaling_selection) {
offset += -scaling_selection_offset.x + (offset - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
height += -scaling_selection_offset.y + (height - scaling_selection_pivot.y) * (scaling_selection_scale.y - 1);
}
}
float offset_n = animation->track_get_key_time(p_track, i_n);
float height_n = animation->bezier_track_get_key_value(p_track, i_n);
Vector2 in_handle = animation->bezier_track_get_key_in_handle(p_track, i_n);
if (p_track == moving_handle_track && (moving_handle == -1 || moving_handle == 1) && moving_handle_key == i_n) {
in_handle = moving_handle_left;
}
if (selection.has(IntPair(p_track, i_n))) {
<|fim_middle|>const Color &p_color
|
eal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#include "animation_bezier_editor.h"
#include "editor/animation/animation_player_editor_plugin.h"
#include "editor/editor_node.h"
#include "editor/editor_string_names.h"
#include "editor/editor_undo_redo_manager.h"
#include "editor/gui/editor_spin_slider.h"
#include "editor/settings/editor_settings.h"
#include "editor/themes/editor_scale.h"
#include "scene/gui/option_button.h"
#include "scene/gui/view_panner.h"
#include "scene/resources/text_line.h"
#include <climits>
float AnimationBezierTrackEdit::_bezier_h_to_pixel(float p_h) {
float h = p_h;
h = (h - timeline_v_scroll) / timeline_v_zoom;
h = (get_size().height / 2.0) - h;
return h;
}
void AnimationBezierTrackEdit::_draw_track(int p_track,
|
const Color &p_color
|
) {
float scale = timeline->get_zoom_scale();
int limit = timeline->get_name_limit();
int right_limit = get_size().width;
// Selection may have altered the order of keys.
RBMap<real_t, int> key_order;
for (int i = 0; i < animation->track_get_key_count(p_track); i++) {
real_t ofs = animation->track_get_key_time(p_track, i);
if (selection.has(IntPair(p_track, i))) {
if (moving_selection) {
ofs += moving_selection_offset.x;
} else if (scaling_selection) {
ofs += -scaling_selection_offset.x + (ofs - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
}
}
key_order[ofs] = i;
}
for (RBMap<real_t, int>::Element *E = key_order.front(); E; E = E->next()) {
int i = E->get();
if (!E->next()) {
break;
}
int i_n = E->next()->get();
float offset = animation->track_get_key_time(p_track, i);
float height = animation->bezier_track_get_key_value(p_track, i);
Vector2 out_handle = animation->bezier_track_get_key_out_handle(p_track, i);
if (p_track == moving_handle_track && (moving_handle == -1 || moving_handle == 1) && moving_handle_key == i) {
out_handle = moving_handle_right;
}
if (selection.has(IntPair(p_track, i))) {
if (moving_selection) {
offset += moving_selection_offset.x;
height += moving_selection_offset.y;
} else if (scaling_selection) {
offset += -scaling_selection_offset.x + (offset - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
height += -scaling_selection_offset.y + (height - scaling_selection_pivot.y) * (scaling_selection_scale.y - 1);
}
}
float offset_n = animation->track_get_key_time(p_track, i_n);
float height_n = animation->bezier_track_get_key_value(p_track, i_n);
Vector2 in_handle = animation->bezier_track_get_key_in_handle(p_track, i_n);
if (p_track == moving_handle_track && (moving_handle == -1 || moving_handle == 1) && moving_handle_key == i_n) {
in_handle = moving_handle_left;
}
if (selection.has(IntPair(p_track, i_n))) {
|
ast_based
|
<|fim_prefix|> } else {
_clear_selection(); // Clicked and nothing happened, so clear the selection.
// Select by clicking on curve.
int track_count = animation->get_track_count();
real_t animation_length = animation->get_length();
animation->set_length(real_t(INT_MAX)); // bezier_track_interpolate doesn't find keys if they exist beyond anim length.
real_t time = ((mb->get_position().x - limit) / timeline->get_zoom_scale()) + timeline->get_value();
for (int i = 0; i < track_count; ++i) {
if (animation->track_get_type(i) != Animation::TrackType::TYPE_BEZIER || hidden_tracks.has(i) || locked_tracks.has(i)) {
continue;
}
float track_h = animation->bezier_track_interpolate(i, time);
float track_height = _bezier_h_to_pixel(track_h);
if (std::abs(mb->get_position().y - track_height) < 10) {
set_animation_and_track(animation, i, read_only);
break;
}
}
animation->set_length(animation_length);
}
box_selecting_attempt = false;
box_selecting = false;
queue_redraw();
}
if (moving_selection_attempt && mb.is_valid() && !mb->is_pressed() && mb->get_button_index() == MouseButton::LEFT) {
if (!read_only) {
if (moving_selection && (std::abs(moving_selection_offset.x) > CMP_EPSILON || std::abs(moving_selection_offset.y) > CMP_EPSILON)) {
// Commit it.
EditorUndoRedoManager *undo_redo = EditorUndoRedoManager::get_singleton();
undo_redo->create_action(TTR("Move Bezier Points"));
List<AnimMoveRestore> to_restore;
List<Animation::HandleMode> to_restore_handle_modes;
// 1 - Remove the keys.
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
undo_redo->add_do_method(animation.ptr(), "track_remove_key", E->get().first, E->get().second);
}
// 2 - Remove overlapped keys.
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t newtime = animation->track_get_key_time(E->get().first, E->get().second) + moving_selection_offset.x;
<|fim_suffix|> continue; // Already in selection, don't save.
}
undo_redo->add_do_method(animation.ptr(), "track_remove_key_at_time", E->get().first, newtime);
AnimMoveRestore amr;
amr.key = animation->track_get_key_value(E->get().first, idx);
amr.track = E->get().first;
amr.time = newtime;
to_restore.push_back(amr);
to_restore_handle_modes.push_back(animation->bezier_track_get_key_handle_mode(E->get().first, idx));
}
// 3 - Move the keys (re-insert them).
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t newpos = animation->track_get_key_time(E->get().first, E->get().second) + moving_selection_offset.x;
Array key = animation->track_get_key_value(E->get().first, E->get().second);
real_t h = key[0];
h += moving_selection_offset.y;
key[0] = h;
Animation::HandleMode handle_mode = animation->bezier_track_get_key_handle_mode(E->get().first, E->get().second);
Animation::HandleSetMode handle_set_mode = Animation::HANDLE_SET_MODE_NONE;
if (moving_inserted_key) {
handle_mode = (Animation::HandleMode)editor->bezier_key_mode->get_selected_id();
handle_set_mode = Animation::HANDLE_SET_MODE_AUTO;
}
undo_redo->add_do_method(
this,
"_bezier_track_insert_key_at_anim",
animation,
E->get().first,
newpos,
key[0],
Vector2(key[1], key[2]),
Vector2(key[3], key[4]),
handle_mode,
handle_set_mode);
}
// 4 - (undo) Remove inserted keys.
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t newpos = animation->track_get_key_time(E->get().first, E->get().second) + moving_selection_offset.x;
undo_redo->add_undo_method(animation.ptr(), "track_remove_key_at_time", E->get().first, newpos);
}
// 5 - (undo) Reinsert keys.
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {<|fim_middle|> int idx = animation->track_find_key(E->get().first, newtime, Animation::FIND_MODE_APPROX);
if (idx == -1) {
continue;
}
if (selection.has(IntPair(E->get().first, idx))) {
|
} else {
_clear_selection(); // Clicked and nothing happened, so clear the selection.
// Select by clicking on curve.
int track_count = animation->get_track_count();
real_t animation_length = animation->get_length();
animation->set_length(real_t(INT_MAX)); // bezier_track_interpolate doesn't find keys if they exist beyond anim length.
real_t time = ((mb->get_position().x - limit) / timeline->get_zoom_scale()) + timeline->get_value();
for (int i = 0; i < track_count; ++i) {
if (animation->track_get_type(i) != Animation::TrackType::TYPE_BEZIER || hidden_tracks.has(i) || locked_tracks.has(i)) {
continue;
}
float track_h = animation->bezier_track_interpolate(i, time);
float track_height = _bezier_h_to_pixel(track_h);
if (std::abs(mb->get_position().y - track_height) < 10) {
set_animation_and_track(animation, i, read_only);
break;
}
}
animation->set_length(animation_length);
}
box_selecting_attempt = false;
box_selecting = false;
queue_redraw();
}
if (moving_selection_attempt && mb.is_valid() && !mb->is_pressed() && mb->get_button_index() == MouseButton::LEFT) {
if (!read_only) {
if (moving_selection && (std::abs(moving_selection_offset.x) > CMP_EPSILON || std::abs(moving_selection_offset.y) > CMP_EPSILON)) {
// Commit it.
EditorUndoRedoManager *undo_redo = EditorUndoRedoManager::get_singleton();
undo_redo->create_action(TTR("Move Bezier Points"));
List<AnimMoveRestore> to_restore;
List<Animation::HandleMode> to_restore_handle_modes;
// 1 - Remove the keys.
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
undo_redo->add_do_method(animation.ptr(), "track_remove_key", E->get().first, E->get().second);
}
// 2 - Remove overlapped keys.
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t newtime = animation->track_get_key_time(E->get().first, E->get().second) + moving_selection_offset.x;
|
int idx = animation->track_find_key(E->get().first, newtime, Animation::FIND_MODE_APPROX);
if (idx == -1) {
continue;
}
if (selection.has(IntPair(E->get().first, idx))) {
|
continue; // Already in selection, don't save.
}
undo_redo->add_do_method(animation.ptr(), "track_remove_key_at_time", E->get().first, newtime);
AnimMoveRestore amr;
amr.key = animation->track_get_key_value(E->get().first, idx);
amr.track = E->get().first;
amr.time = newtime;
to_restore.push_back(amr);
to_restore_handle_modes.push_back(animation->bezier_track_get_key_handle_mode(E->get().first, idx));
}
// 3 - Move the keys (re-insert them).
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t newpos = animation->track_get_key_time(E->get().first, E->get().second) + moving_selection_offset.x;
Array key = animation->track_get_key_value(E->get().first, E->get().second);
real_t h = key[0];
h += moving_selection_offset.y;
key[0] = h;
Animation::HandleMode handle_mode = animation->bezier_track_get_key_handle_mode(E->get().first, E->get().second);
Animation::HandleSetMode handle_set_mode = Animation::HANDLE_SET_MODE_NONE;
if (moving_inserted_key) {
handle_mode = (Animation::HandleMode)editor->bezier_key_mode->get_selected_id();
handle_set_mode = Animation::HANDLE_SET_MODE_AUTO;
}
undo_redo->add_do_method(
this,
"_bezier_track_insert_key_at_anim",
animation,
E->get().first,
newpos,
key[0],
Vector2(key[1], key[2]),
Vector2(key[3], key[4]),
handle_mode,
handle_set_mode);
}
// 4 - (undo) Remove inserted keys.
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t newpos = animation->track_get_key_time(E->get().first, E->get().second) + moving_selection_offset.x;
undo_redo->add_undo_method(animation.ptr(), "track_remove_key_at_time", E->get().first, newpos);
}
// 5 - (undo) Reinsert keys.
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
|
random
|
<|fim_prefix|> parametersWriter << "distortion_coefficients_std_dev" << mCalibData->stdDeviations.rowRange(cv::Range(4, 9));
parametersWriter << "avg_reprojection_error" << mCalibData->totalAvgErr;
parametersWriter.release();
success = true;
}
}
return success;
}
void calib::calibDataController::printParametersToConsole(std::ostream &output) const
{
const char* border = "---------------------------------------------------";
output << border << std::endl;
output << "Frames used for calibration: " << std::max(mCalibData->objectPoints.size(), mCalibData->allCharucoCorners.size())
<< " \t RMS = " << mCalibData->totalAvgErr << std::endl;
if(mCalibData->cameraMatrix.at<double>(0,0) == mCalibData->cameraMatrix.at<double>(1,1))
output << "F = " << mCalibData->cameraMatrix.at<double>(1,1) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(1) << std::endl;
else
output << "Fx = " << mCalibData->cameraMatrix.at<double>(0,0) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(0) << " \t "
<< "Fy = " << mCalibData->cameraMatrix.at<double>(1,1) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(1) << std::endl;
output << "Cx = " << mCalibData->cameraMatrix.at<double>(0,2) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(2) << " \t"
<< "Cy = " << mCalibData->cameraMatrix.at<double>(1,2) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(3) << std::endl;
output << "K1 = " << mCalibData->distCoeffs.at<double>(0) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(4) << std::endl;
output << "K2 = " << mCalibData->distCoeffs.at<double>(1) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(5) << std::endl;
output << "K3 = " << mCalibData->distCoeffs.at<double>(4) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(8) << std::endl;<|fim_suffix|>}
void calib::calibDataController::updateUndistortMap()
{
cv::initUndistortRectifyMap(mCalibData->cameraMatrix, mCalibData->distCoeffs, cv::noArray(),
cv::getOptimalNewCameraMatrix(mCalibData->cameraMatrix, mCalibData->distCoeffs, mCalibData->imageSize, 0.0, mCalibData->imageSize),
mCalibData->imageSize, CV_16SC2, mCalibData->undistMap1, mCalibData->undistMap2);
}
<|fim_middle|> output << "TD1 = " << mCalibData->distCoeffs.at<double>(2) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(6) << std::endl;
output << "TD2 = " << mCalibData->distCoeffs.at<double>(3) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(7) << std::endl;
|
parametersWriter << "distortion_coefficients_std_dev" << mCalibData->stdDeviations.rowRange(cv::Range(4, 9));
parametersWriter << "avg_reprojection_error" << mCalibData->totalAvgErr;
parametersWriter.release();
success = true;
}
}
return success;
}
void calib::calibDataController::printParametersToConsole(std::ostream &output) const
{
const char* border = "---------------------------------------------------";
output << border << std::endl;
output << "Frames used for calibration: " << std::max(mCalibData->objectPoints.size(), mCalibData->allCharucoCorners.size())
<< " \t RMS = " << mCalibData->totalAvgErr << std::endl;
if(mCalibData->cameraMatrix.at<double>(0,0) == mCalibData->cameraMatrix.at<double>(1,1))
output << "F = " << mCalibData->cameraMatrix.at<double>(1,1) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(1) << std::endl;
else
output << "Fx = " << mCalibData->cameraMatrix.at<double>(0,0) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(0) << " \t "
<< "Fy = " << mCalibData->cameraMatrix.at<double>(1,1) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(1) << std::endl;
output << "Cx = " << mCalibData->cameraMatrix.at<double>(0,2) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(2) << " \t"
<< "Cy = " << mCalibData->cameraMatrix.at<double>(1,2) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(3) << std::endl;
output << "K1 = " << mCalibData->distCoeffs.at<double>(0) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(4) << std::endl;
output << "K2 = " << mCalibData->distCoeffs.at<double>(1) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(5) << std::endl;
output << "K3 = " << mCalibData->distCoeffs.at<double>(4) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(8) << std::endl;
|
output << "TD1 = " << mCalibData->distCoeffs.at<double>(2) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(6) << std::endl;
output << "TD2 = " << mCalibData->distCoeffs.at<double>(3) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(7) << std::endl;
|
}
void calib::calibDataController::updateUndistortMap()
{
cv::initUndistortRectifyMap(mCalibData->cameraMatrix, mCalibData->distCoeffs, cv::noArray(),
cv::getOptimalNewCameraMatrix(mCalibData->cameraMatrix, mCalibData->distCoeffs, mCalibData->imageSize, 0.0, mCalibData->imageSize),
mCalibData->imageSize, CV_16SC2, mCalibData->undistMap1, mCalibData->undistMap2);
}
|
random
|
<|fim_prefix|>
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_position_in_set(ae->node, p_index);
}
void AccessibilityDriverAccessKit::accessibility_update_set_list_item_level(const RID &p_id, int p_level) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_level(ae->node, p_level);
}
void AccessibilityDriverAccessKit::accessibility_update_set_list_item_selected(const RID &p_id, bool p_selected) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_selected(ae->node, p_selected);
}
void AccessibilityDriverAccessKit::accessibility_update_set_list_item_expanded(const RID &p_id, bool p_expanded) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_expanded(ae->node, p_expanded);
}
void AccessibilityDriverAccessKit::accessibility_update_set_popup_type(const RID &p_id, DisplayServer::AccessibilityPopupType p_popup) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
switch (p_popup) {
case DisplayServer::AccessibilityPopupType::POPUP_MENU: {<|fim_suffix|> } break;
case DisplayServer::AccessibilityPopupType::POPUP_LIST: {
accesskit_node_set_has_popup(ae->node, ACCESSKIT_HAS_POPUP_LISTBOX);
} break;
case DisplayServer::AccessibilityPopupType::POPUP_TREE: {
accesskit_node_set_has_popup(ae->node, ACCESSKIT_HAS_POPUP_TREE);
} break;
case DisplayServer::AccessibilityPopupType::POPUP_DIALOG: {
accesskit_node_set_has_popup(ae->node, ACCESSKIT_HAS_POPUP_DIALOG);
} break;
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_checked(const RID &p_id, bool p_checekd) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (p_checekd) {
accesskit_node_set_toggled(ae->node, ACCESSKIT_TOGGLED_TRUE);
} else {
accesskit_node_set_toggled(ae->node, ACCESSKIT_TOGGLED_FALSE);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_num_value(const RID &p_id, double p_position) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_numeric_value(ae->node, p_position);
}
void AccessibilityDriverAccessKit::accessibility_update_set_num_range(const RID &p_id, double p_min, double p_max) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_min_numeric_value(ae->node, p_min);
accesskit_node_set_max_numeric_value(ae->node, p_max);
}
void AccessibilityDriverAccessKit::accessibility_update_set_num_step(const RID &p_id, double p_step) {<|fim_middle|> accesskit_node_set_has_popup(ae->node, ACCESSKIT_HAS_POPUP_MENU);
|
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_position_in_set(ae->node, p_index);
}
void AccessibilityDriverAccessKit::accessibility_update_set_list_item_level(const RID &p_id, int p_level) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_level(ae->node, p_level);
}
void AccessibilityDriverAccessKit::accessibility_update_set_list_item_selected(const RID &p_id, bool p_selected) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_selected(ae->node, p_selected);
}
void AccessibilityDriverAccessKit::accessibility_update_set_list_item_expanded(const RID &p_id, bool p_expanded) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_expanded(ae->node, p_expanded);
}
void AccessibilityDriverAccessKit::accessibility_update_set_popup_type(const RID &p_id, DisplayServer::AccessibilityPopupType p_popup) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
switch (p_popup) {
case DisplayServer::AccessibilityPopupType::POPUP_MENU: {
|
accesskit_node_set_has_popup(ae->node, ACCESSKIT_HAS_POPUP_MENU);
|
} break;
case DisplayServer::AccessibilityPopupType::POPUP_LIST: {
accesskit_node_set_has_popup(ae->node, ACCESSKIT_HAS_POPUP_LISTBOX);
} break;
case DisplayServer::AccessibilityPopupType::POPUP_TREE: {
accesskit_node_set_has_popup(ae->node, ACCESSKIT_HAS_POPUP_TREE);
} break;
case DisplayServer::AccessibilityPopupType::POPUP_DIALOG: {
accesskit_node_set_has_popup(ae->node, ACCESSKIT_HAS_POPUP_DIALOG);
} break;
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_checked(const RID &p_id, bool p_checekd) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (p_checekd) {
accesskit_node_set_toggled(ae->node, ACCESSKIT_TOGGLED_TRUE);
} else {
accesskit_node_set_toggled(ae->node, ACCESSKIT_TOGGLED_FALSE);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_num_value(const RID &p_id, double p_position) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_numeric_value(ae->node, p_position);
}
void AccessibilityDriverAccessKit::accessibility_update_set_num_range(const RID &p_id, double p_min, double p_max) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_min_numeric_value(ae->node, p_min);
accesskit_node_set_max_numeric_value(ae->node, p_max);
}
void AccessibilityDriverAccessKit::accessibility_update_set_num_step(const RID &p_id, double p_step) {
|
random
|
<|fim_prefix|>guf file %s\n", __func__, filename);
struct ggml_context * ctx_data = NULL;
struct gguf_init_params params = {
/*.no_alloc = */ false,
/*.ctx = */ &ctx_data,
};
struct gguf_context * ctx = gguf_init_from_file(filename, params);
GGML_ASSERT(ctx != NULL);
const int model_idx = gguf_find_key(ctx, KV_TOKENIZER_MODEL);
GGML_ASSERT(model_idx >= 0);
std::string tokenizer_name = gguf_get_val_str(ctx, model_idx);
GGML_ASSERT(tokenizer_name == TOKENIZER_NAME);
const int token_idx = gguf_find_key(ctx, KV_TOKENIZER_LIST);
GGML_ASSERT(token_idx >= 0);
const int score_idx = gguf_find_key(ctx, KV_TOKENIZER_SCORES);
GGML_ASSERT(score_idx >= 0);
const float * scores = (const float * ) gguf_get_arr_data(ctx, score_idx);
const int toktype_idx = gguf_find_key(ctx, KV_TOKENIZER_TOKEN_TYPE);
GGML_ASSERT(toktype_idx >= 0);
const int * toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx);
const uint32_t n_vocab = gguf_get_arr_n(ctx, token_idx);
if (n_vocab != static_cast<uint32_t>(config->vocab_size)) {
die_fmt("vocab size mismatch: (gguf) %u != (llama2c) %d", n_vocab, config->vocab_size);
}
vocab->id_to_token.resize(n_vocab);
for (uint32_t i = 0; i < n_vocab; i++) {
std::string word = gguf_get_arr_str(ctx, token_idx, i);
vocab->token_to_id[word] = i;
auto & token_data = vocab->id_to_token[i];
token_data.text = std::move(word);
token_data.score = scores[i];
token_data.type = (llama_token_type) toktypes[i];
}
ggml_free(ctx_data);
gguf_free(ctx);
} else {
// assume llama2.c vocabulary
LOG_INF("%s: Assuming llama2.c vocabulary since %s is not a gguf file\n", __func__, filename);
my_llama_file file(filename, "rb");
if (!file.fp) <|fim_suffix|>
const int n_vocab = config->vocab_size;
/* uint32_t max_token_length = */ file.read_u32(); // unused
vocab->id_to_token.resize(n_vocab);
for (my_llama_vocab::id id=0; id<n_vocab; ++id) {
float_t score = file.read_f32();
uint32_t len = file.read_u32();
std::string text = file.read_string(len);
unsigned char byte_val;
my_llama_vocab::ttype type = LLAMA_TOKEN_TYPE_NORMAL;
if (id == UNKNOWN_TOKEN_ID) {
text = "<unk>";
type = LLAMA_TOKEN_TYPE_UNKNOWN;
} else if (id == BOS_TOKEN_ID) {
text = "<s>";
type = LLAMA_TOKEN_TYPE_CONTROL;
} else if (id == EOS_TOKEN_ID) {
text = "</s>";
type = LLAMA_TOKEN_TYPE_CONTROL;
} else if (text.empty()) {
type = LLAMA_TOKEN_TYPE_CONTROL;
} else if (sscanf(text.c_str(), "<0x%02hhX>", &byte_val) == 1) {
// Text of byte tokens is already in the expected format.
type = LLAMA_TOKEN_TYPE_BYTE;
} else {
type = LLAMA_TOKEN_TYPE_NORMAL;
}
text = llama_escape_whitespaces(text);
vocab->id_to_token[id].text = text;
vocab->id_to_token[id].score = score;
vocab->id_to_token[id].type = type;
vocab->token_to_id.emplace(text, id);
}
}
}
static void convert_weights_ak_to_gg(struct ggml_tensor * gg_weights, const float * karpathy_weights) {
int size = 1;
for (int dim = 0; dim < ggml_n_dims(gg_weights); ++dim) {
size *= gg_weights->ne[dim];
}
for (int ct = 0; ct < size; ++ct) {
int64_t i0 = 0; int64_t i1 = 0;
int64_t i2 = 0; int64_t i3 = 0;
ggml_unravel_index(gg_weights, ct, &i0, &i1, &i2, &i3);
ggml_set_f32_nd(gg_weights, i0, i1, i2, i3, karpathy_weights[ct]);
}
}
static void save_as_llama_mode<|fim_middle|>{
die_fmt("%s: %s", strerror(errno), filename);
}
|
guf file %s\n", __func__, filename);
struct ggml_context * ctx_data = NULL;
struct gguf_init_params params = {
/*.no_alloc = */ false,
/*.ctx = */ &ctx_data,
};
struct gguf_context * ctx = gguf_init_from_file(filename, params);
GGML_ASSERT(ctx != NULL);
const int model_idx = gguf_find_key(ctx, KV_TOKENIZER_MODEL);
GGML_ASSERT(model_idx >= 0);
std::string tokenizer_name = gguf_get_val_str(ctx, model_idx);
GGML_ASSERT(tokenizer_name == TOKENIZER_NAME);
const int token_idx = gguf_find_key(ctx, KV_TOKENIZER_LIST);
GGML_ASSERT(token_idx >= 0);
const int score_idx = gguf_find_key(ctx, KV_TOKENIZER_SCORES);
GGML_ASSERT(score_idx >= 0);
const float * scores = (const float * ) gguf_get_arr_data(ctx, score_idx);
const int toktype_idx = gguf_find_key(ctx, KV_TOKENIZER_TOKEN_TYPE);
GGML_ASSERT(toktype_idx >= 0);
const int * toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx);
const uint32_t n_vocab = gguf_get_arr_n(ctx, token_idx);
if (n_vocab != static_cast<uint32_t>(config->vocab_size)) {
die_fmt("vocab size mismatch: (gguf) %u != (llama2c) %d", n_vocab, config->vocab_size);
}
vocab->id_to_token.resize(n_vocab);
for (uint32_t i = 0; i < n_vocab; i++) {
std::string word = gguf_get_arr_str(ctx, token_idx, i);
vocab->token_to_id[word] = i;
auto & token_data = vocab->id_to_token[i];
token_data.text = std::move(word);
token_data.score = scores[i];
token_data.type = (llama_token_type) toktypes[i];
}
ggml_free(ctx_data);
gguf_free(ctx);
} else {
// assume llama2.c vocabulary
LOG_INF("%s: Assuming llama2.c vocabulary since %s is not a gguf file\n", __func__, filename);
my_llama_file file(filename, "rb");
if (!file.fp)
|
{
die_fmt("%s: %s", strerror(errno), filename);
}
|
const int n_vocab = config->vocab_size;
/* uint32_t max_token_length = */ file.read_u32(); // unused
vocab->id_to_token.resize(n_vocab);
for (my_llama_vocab::id id=0; id<n_vocab; ++id) {
float_t score = file.read_f32();
uint32_t len = file.read_u32();
std::string text = file.read_string(len);
unsigned char byte_val;
my_llama_vocab::ttype type = LLAMA_TOKEN_TYPE_NORMAL;
if (id == UNKNOWN_TOKEN_ID) {
text = "<unk>";
type = LLAMA_TOKEN_TYPE_UNKNOWN;
} else if (id == BOS_TOKEN_ID) {
text = "<s>";
type = LLAMA_TOKEN_TYPE_CONTROL;
} else if (id == EOS_TOKEN_ID) {
text = "</s>";
type = LLAMA_TOKEN_TYPE_CONTROL;
} else if (text.empty()) {
type = LLAMA_TOKEN_TYPE_CONTROL;
} else if (sscanf(text.c_str(), "<0x%02hhX>", &byte_val) == 1) {
// Text of byte tokens is already in the expected format.
type = LLAMA_TOKEN_TYPE_BYTE;
} else {
type = LLAMA_TOKEN_TYPE_NORMAL;
}
text = llama_escape_whitespaces(text);
vocab->id_to_token[id].text = text;
vocab->id_to_token[id].score = score;
vocab->id_to_token[id].type = type;
vocab->token_to_id.emplace(text, id);
}
}
}
static void convert_weights_ak_to_gg(struct ggml_tensor * gg_weights, const float * karpathy_weights) {
int size = 1;
for (int dim = 0; dim < ggml_n_dims(gg_weights); ++dim) {
size *= gg_weights->ne[dim];
}
for (int ct = 0; ct < size; ++ct) {
int64_t i0 = 0; int64_t i1 = 0;
int64_t i2 = 0; int64_t i3 = 0;
ggml_unravel_index(gg_weights, ct, &i0, &i1, &i2, &i3);
ggml_set_f32_nd(gg_weights, i0, i1, i2, i3, karpathy_weights[ct]);
}
}
static void save_as_llama_mode
|
ast_based
|
<|fim_prefix|>ize = view.size();
cvtColor(view, viewGray, COLOR_BGR2GRAY);
bool found = findChessboardCorners( view, boardSize, ptvec, CALIB_CB_ADAPTIVE_THRESH );
drawChessboardCorners( view, boardSize, Mat(ptvec), found );
if( found )
{
imgpt[k1][i].resize(ptvec.size());
std::copy(ptvec.begin(), ptvec.end(), imgpt[k1][i].begin());
}
//imshow("view", view);
//int c = waitKey(0) & 255;
//if( c == 27 || c == 'q' || c == 'Q' )
// return -1;
}
}
}
printf("Running calibration ...\n");
run3Calibration(imgpt[0], imgpt[1], imgpt[2], imageSize,
boardSize, squareSize, aspectRatio, flags|CALIB_FIX_K4|CALIB_FIX_K5,
cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],
cameraMatrix[2], distCoeffs[2],
R12, T12, R13, T13);
fs.open(outputFilename, FileStorage::WRITE);
fs << "cameraMatrix1" << cameraMatrix[0];
fs << "cameraMatrix2" << cameraMatrix[1];
fs << "cameraMatrix3" << cameraMatrix[2];
fs << "distCoeffs1" << distCoeffs[0];
fs << "distCoeffs2" << distCoeffs[1];
fs << "distCoeffs3" << distCoeffs[2];
fs << "R12" << R12;
fs << "T12" << T12;
fs << "R13" << R13;
fs << "T13" << T13;
fs << "imageWidth" << imageSize.width;
fs << "imageHeight" << imageSize.height;
Mat Q;
// step 3: find rectification transforms
double ratio = rectify3Collinear(cameraMatrix[0], distCoeffs[0], cameraMatrix[1],
distCoeffs[1], cameraMatrix[2], distCoeffs[2],
imgpt[0], imgpt[2],
imageSize, R12, T12, R13, T13,
R[0], R[1], R[2], P[0], P[1], P[2], Q, -1.,
imageSize, 0, 0, CALIB_ZERO_DISPARITY);
Mat map1[3], map2[3];
fs << "R1" << R[0];
<|fim_suffix|>
fs << "R3" << R[2];
fs << "P1" << P[0];
fs << "P2" << P[1];
fs << "P3" << P[2];
fs << "disparityRatio" << ratio;
fs.release();
printf("Disparity ratio = %g\n", ratio);
for( k = 0; k < 3; k++ )
initUndistortRectifyMap(cameraMatrix[k], distCoeffs[k], R[k], P[k], imageSize, CV_16SC2, map1[k], map2[k]);
Mat canvas(imageSize.height, imageSize.width*3, CV_8UC3), small_canvas;
destroyWindow("view");
canvas = Scalar::all(0);
for( i = 0; i < (int)(imageList.size()/3); i++ )
{
canvas = Scalar::all(0);
for( k = 0; k < 3; k++ )
{
int k1 = k == 0 ? 2 : k == 1 ? 0 : 1;
int k2 = k == 0 ? 1 : k == 1 ? 0 : 2;
view = imread(imageList[i*3+k], IMREAD_COLOR);
if(view.empty())
continue;
Mat rview = canvas.colRange(k2*imageSize.width, (k2+1)*imageSize.width);
remap(view, rview, map1[k1], map2[k1], INTER_LINEAR);
}
printf("%s %s %s\n", imageList[i*3].c_str(), imageList[i*3+1].c_str(), imageList[i*3+2].c_str());
resize( canvas, small_canvas, Size(1500, 1500/3), 0, 0, INTER_LINEAR_EXACT );
for( k = 0; k < small_canvas.rows; k += 16 )
line(small_canvas, Point(0, k), Point(small_canvas.cols, k), Scalar(0,255,0), 1);
imshow("rectified", small_canvas);
char c = (char)waitKey(0);
if( c == 27 || c == 'q' || c == 'Q' )
break;
}
return 0;
}
<|fim_middle|>fs << "R2" << R[1];
|
ize = view.size();
cvtColor(view, viewGray, COLOR_BGR2GRAY);
bool found = findChessboardCorners( view, boardSize, ptvec, CALIB_CB_ADAPTIVE_THRESH );
drawChessboardCorners( view, boardSize, Mat(ptvec), found );
if( found )
{
imgpt[k1][i].resize(ptvec.size());
std::copy(ptvec.begin(), ptvec.end(), imgpt[k1][i].begin());
}
//imshow("view", view);
//int c = waitKey(0) & 255;
//if( c == 27 || c == 'q' || c == 'Q' )
// return -1;
}
}
}
printf("Running calibration ...\n");
run3Calibration(imgpt[0], imgpt[1], imgpt[2], imageSize,
boardSize, squareSize, aspectRatio, flags|CALIB_FIX_K4|CALIB_FIX_K5,
cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],
cameraMatrix[2], distCoeffs[2],
R12, T12, R13, T13);
fs.open(outputFilename, FileStorage::WRITE);
fs << "cameraMatrix1" << cameraMatrix[0];
fs << "cameraMatrix2" << cameraMatrix[1];
fs << "cameraMatrix3" << cameraMatrix[2];
fs << "distCoeffs1" << distCoeffs[0];
fs << "distCoeffs2" << distCoeffs[1];
fs << "distCoeffs3" << distCoeffs[2];
fs << "R12" << R12;
fs << "T12" << T12;
fs << "R13" << R13;
fs << "T13" << T13;
fs << "imageWidth" << imageSize.width;
fs << "imageHeight" << imageSize.height;
Mat Q;
// step 3: find rectification transforms
double ratio = rectify3Collinear(cameraMatrix[0], distCoeffs[0], cameraMatrix[1],
distCoeffs[1], cameraMatrix[2], distCoeffs[2],
imgpt[0], imgpt[2],
imageSize, R12, T12, R13, T13,
R[0], R[1], R[2], P[0], P[1], P[2], Q, -1.,
imageSize, 0, 0, CALIB_ZERO_DISPARITY);
Mat map1[3], map2[3];
fs << "R1" << R[0];
|
fs << "R2" << R[1];
|
fs << "R3" << R[2];
fs << "P1" << P[0];
fs << "P2" << P[1];
fs << "P3" << P[2];
fs << "disparityRatio" << ratio;
fs.release();
printf("Disparity ratio = %g\n", ratio);
for( k = 0; k < 3; k++ )
initUndistortRectifyMap(cameraMatrix[k], distCoeffs[k], R[k], P[k], imageSize, CV_16SC2, map1[k], map2[k]);
Mat canvas(imageSize.height, imageSize.width*3, CV_8UC3), small_canvas;
destroyWindow("view");
canvas = Scalar::all(0);
for( i = 0; i < (int)(imageList.size()/3); i++ )
{
canvas = Scalar::all(0);
for( k = 0; k < 3; k++ )
{
int k1 = k == 0 ? 2 : k == 1 ? 0 : 1;
int k2 = k == 0 ? 1 : k == 1 ? 0 : 2;
view = imread(imageList[i*3+k], IMREAD_COLOR);
if(view.empty())
continue;
Mat rview = canvas.colRange(k2*imageSize.width, (k2+1)*imageSize.width);
remap(view, rview, map1[k1], map2[k1], INTER_LINEAR);
}
printf("%s %s %s\n", imageList[i*3].c_str(), imageList[i*3+1].c_str(), imageList[i*3+2].c_str());
resize( canvas, small_canvas, Size(1500, 1500/3), 0, 0, INTER_LINEAR_EXACT );
for( k = 0; k < small_canvas.rows; k += 16 )
line(small_canvas, Point(0, k), Point(small_canvas.cols, k), Scalar(0,255,0), 1);
imshow("rectified", small_canvas);
char c = (char)waitKey(0);
if( c == 27 || c == 'q' || c == 'Q' )
break;
}
return 0;
}
|
ast_based
|
<|fim_prefix|> == 0) {
return; // Do nothing.
}
// Frame count may be changed by animation_finished or frame_changed signals.
int fc = frames->get_frame_count(animation);
int last_frame = fc - 1;
if (!std::signbit(speed)) {
// Forwards.
if (frame_progress >= 1.0) {
if (frame >= last_frame) {
if (frames->get_animation_loop(animation)) {
frame = 0;
emit_signal("animation_looped");
} else {
frame = last_frame;
pause();
emit_signal(SceneStringName(animation_finished));
return;
}
} else {
frame++;
}
_calc_frame_speed_scale();
frame_progress = 0.0;
queue_redraw();
emit_signal(SceneStringName(frame_changed));
}
double to_process = MIN((1.0 - frame_progress) / abs_speed, remaining);
frame_progress += to_process * abs_speed;
remaining -= to_process;
} else {
// Backwards.
if (frame_progress <= 0) {
if (frame <= 0) {
if (frames->get_animation_loop(animation)) {
frame = last_frame;
emit_signal("animation_looped");
} else {
frame = 0;
pause();
emit_signal(SceneStringName(animation_finished));
return;
}
} else {
frame--;
}
_calc_frame_speed_scale();
frame_progress = 1.0;
queue_redraw();
emit_signal(SceneStringName(frame_changed));
}
double to_process = MIN(frame_progress / abs_speed, remaining);
frame_progress -= to_process * abs_speed;
remaining -= to_process;
}
i++;
if (i > fc) {
return; // Prevents freezing if to_process is each time much less than remaining.
}
}
} break;
case NOTIFICATION_DRAW: {
if (frames.is_null() || !frames->has_animation(animation)) {
return;
}
Ref<Texture2D> texture = frames->get_frame_texture(animation, frame);
if (texture.is_null()) {
return;
}
RID ci = get_canvas_item();
Size2 s = <|fim_suffix|>;
Point2 ofs = offset;
if (centered) {
ofs -= s / 2;
}
if (get_viewport() && get_viewport()->is_snap_2d_transforms_to_pixel_enabled()) {
ofs = (ofs + Point2(0.5, 0.5)).floor();
}
Rect2 dst_rect(ofs, s);
if (hflip) {
dst_rect.size.x = -dst_rect.size.x;
}
if (vflip) {
dst_rect.size.y = -dst_rect.size.y;
}
texture->draw_rect_region(ci, dst_rect, Rect2(Vector2(), texture->get_size()), Color(1, 1, 1), false);
} break;
}
}
void AnimatedSprite2D::set_sprite_frames(const Ref<SpriteFrames> &p_frames) {
if (frames == p_frames) {
return;
}
if (frames.is_valid()) {
frames->disconnect(CoreStringName(changed), callable_mp(this, &AnimatedSprite2D::_res_changed));
}
stop();
frames = p_frames;
if (frames.is_valid()) {
frames->connect(CoreStringName(changed), callable_mp(this, &AnimatedSprite2D::_res_changed));
List<StringName> al;
frames->get_animation_list(&al);
if (al.is_empty()) {
set_animation(StringName());
autoplay = String();
} else {
if (!frames->has_animation(animation)) {
set_animation(al.front()->get());
}
if (!frames->has_animation(autoplay)) {
autoplay = String();
}
}
}
notify_property_list_changed();
queue_redraw();
update_configuration_warnings();
emit_signal("sprite_frames_changed");
}
Ref<SpriteFrames> AnimatedSprite2D::get_sprite_frames() const {
return frames;
}
void AnimatedSprite2D::set_frame(int p_frame) {
set_frame_and_progress(p_frame, std::signbit(get_playing_speed()) ? 1.0 : 0.0);
}
int AnimatedSprite2D::get_frame() const {
return frame;
}
void AnimatedSprite2D::set_frame_progress(real_t p_progress) {
frame_progress = p_progress;
}
real_t AnimatedSprite2D::get_frame_progress() const {
return frame_progress;
}
void AnimatedSprite2D::set_frame_and_progress(int p_frame, real_t p_progress) {
if (frames.is_null()) {
return;
}
bool has_animation = frames->has_animation(animation);
int end_frame = has_animation ? MAX(0, frames->get_fr<|fim_middle|>texture->get_size()
|
== 0) {
return; // Do nothing.
}
// Frame count may be changed by animation_finished or frame_changed signals.
int fc = frames->get_frame_count(animation);
int last_frame = fc - 1;
if (!std::signbit(speed)) {
// Forwards.
if (frame_progress >= 1.0) {
if (frame >= last_frame) {
if (frames->get_animation_loop(animation)) {
frame = 0;
emit_signal("animation_looped");
} else {
frame = last_frame;
pause();
emit_signal(SceneStringName(animation_finished));
return;
}
} else {
frame++;
}
_calc_frame_speed_scale();
frame_progress = 0.0;
queue_redraw();
emit_signal(SceneStringName(frame_changed));
}
double to_process = MIN((1.0 - frame_progress) / abs_speed, remaining);
frame_progress += to_process * abs_speed;
remaining -= to_process;
} else {
// Backwards.
if (frame_progress <= 0) {
if (frame <= 0) {
if (frames->get_animation_loop(animation)) {
frame = last_frame;
emit_signal("animation_looped");
} else {
frame = 0;
pause();
emit_signal(SceneStringName(animation_finished));
return;
}
} else {
frame--;
}
_calc_frame_speed_scale();
frame_progress = 1.0;
queue_redraw();
emit_signal(SceneStringName(frame_changed));
}
double to_process = MIN(frame_progress / abs_speed, remaining);
frame_progress -= to_process * abs_speed;
remaining -= to_process;
}
i++;
if (i > fc) {
return; // Prevents freezing if to_process is each time much less than remaining.
}
}
} break;
case NOTIFICATION_DRAW: {
if (frames.is_null() || !frames->has_animation(animation)) {
return;
}
Ref<Texture2D> texture = frames->get_frame_texture(animation, frame);
if (texture.is_null()) {
return;
}
RID ci = get_canvas_item();
Size2 s =
|
texture->get_size()
|
;
Point2 ofs = offset;
if (centered) {
ofs -= s / 2;
}
if (get_viewport() && get_viewport()->is_snap_2d_transforms_to_pixel_enabled()) {
ofs = (ofs + Point2(0.5, 0.5)).floor();
}
Rect2 dst_rect(ofs, s);
if (hflip) {
dst_rect.size.x = -dst_rect.size.x;
}
if (vflip) {
dst_rect.size.y = -dst_rect.size.y;
}
texture->draw_rect_region(ci, dst_rect, Rect2(Vector2(), texture->get_size()), Color(1, 1, 1), false);
} break;
}
}
void AnimatedSprite2D::set_sprite_frames(const Ref<SpriteFrames> &p_frames) {
if (frames == p_frames) {
return;
}
if (frames.is_valid()) {
frames->disconnect(CoreStringName(changed), callable_mp(this, &AnimatedSprite2D::_res_changed));
}
stop();
frames = p_frames;
if (frames.is_valid()) {
frames->connect(CoreStringName(changed), callable_mp(this, &AnimatedSprite2D::_res_changed));
List<StringName> al;
frames->get_animation_list(&al);
if (al.is_empty()) {
set_animation(StringName());
autoplay = String();
} else {
if (!frames->has_animation(animation)) {
set_animation(al.front()->get());
}
if (!frames->has_animation(autoplay)) {
autoplay = String();
}
}
}
notify_property_list_changed();
queue_redraw();
update_configuration_warnings();
emit_signal("sprite_frames_changed");
}
Ref<SpriteFrames> AnimatedSprite2D::get_sprite_frames() const {
return frames;
}
void AnimatedSprite2D::set_frame(int p_frame) {
set_frame_and_progress(p_frame, std::signbit(get_playing_speed()) ? 1.0 : 0.0);
}
int AnimatedSprite2D::get_frame() const {
return frame;
}
void AnimatedSprite2D::set_frame_progress(real_t p_progress) {
frame_progress = p_progress;
}
real_t AnimatedSprite2D::get_frame_progress() const {
return frame_progress;
}
void AnimatedSprite2D::set_frame_and_progress(int p_frame, real_t p_progress) {
if (frames.is_null()) {
return;
}
bool has_animation = frames->has_animation(animation);
int end_frame = has_animation ? MAX(0, frames->get_fr
|
ast_based
|
<|fim_prefix|>#define CARBON_TOOLCHAIN_BASE_CANONICAL_VALUE_STORE_H_
#include "common/hashtable_key_context.h"
#include "common/set.h"
#include "toolchain/base/mem_usage.h"
#include "toolchain/base/value_store.h"
#include "toolchain/base/value_store_types.h"
#include "toolchain/base/yaml.h"
namespace Carbon {
// A wrapper for accumulating immutable values with deduplication, providing IDs
// to later retrieve the value.
//
// `ValueT` represents the type being stored.
//
// `KeyT` can optionally be different from `ValueT`, and if so is used for the
// argument to `Lookup`. It must be valid to use both `KeyT` and `ValueT` as
// lookup types in the underlying `Set`.
template <typename IdT, typename KeyT, typename ValueT = KeyT>
class CanonicalValueStore {
public:
using KeyType = std::remove_cvref_t<KeyT>;
using ValueType = ValueStoreTypes<ValueT>::ValueType;
using RefType = ValueStoreTypes<ValueT>::RefType;
using ConstRefType = ValueStoreTypes<ValueT>::ConstRefType;
// Stores a canonical copy of the value and returns an ID to reference it.
auto Add(ValueType value) -> IdT;
// Returns the value for an ID.
auto Get(IdT id) const -> ConstRefType { return values_.Get(id); }
// Looks up the canonical ID for a value, or returns `None` if not in the
// store.
auto Lookup(KeyType key) const -> IdT;
// Reserves space.
auto Reserve(size_t size) -> void;
// These are to support printable structures, and are not guaranteed.
auto OutputYaml() const -> Yaml::OutputMapping {
return values_.OutputYaml();
}
auto values() const [[clang::lifetimebound]]
-> ValueStore<IdT, ValueType>::Range {
return values_.values();
}
auto size() const -> size_t { return values_.size(); }
// Collects memory usage of the values and deduplication set.
auto CollectMemUsage(MemUsage& mem_usage, llvm::StringRef label) const
-> void {
mem_usage.Collect(MemUsage::ConcatLabel(label, "values_"), values_);<|fim_suffix|> }
private:
class KeyContext;
ValueStore<IdT, ValueType> values_;
Set<IdT, /*SmallSize=*/0, KeyContext> set_;
};
template <typename IdT, typename KeyT, typename ValueT>
class CanonicalValueStore<IdT, KeyT, ValueT>::KeyContext
: public TranslatingKeyContext<KeyContext> {
public:
explicit KeyContext(const ValueStore<IdT, ValueType>* values)
: values_(values) {}
// Note that it is safe to return a `const` reference here as the underlying
// object's lifetime is provided by the `ValueStore`.
auto TranslateKey(IdT id) const -> ConstRefType { return values_->Get(id); }
private:
const ValueStore<IdT, ValueType>* values_;
};
template <typename IdT, typename KeyT, typename ValueT>
auto CanonicalValueStore<IdT, KeyT, ValueT>::Add(ValueType value) -> IdT {
auto make_key = [&] { return IdT(values_.Add(std::move(value))); };
return set_.Insert(value, make_key, KeyContext(&values_)).key();
}
template <typename IdT, typename KeyT, typename ValueT>
auto CanonicalValueStore<IdT, KeyT, ValueT>::Lookup(KeyType key) const -> IdT {
if (auto result = set_.Lookup(key, KeyContext(&values_))) {
return result.key();
}
return IdT::None;
}
template <typename IdT, typename KeyT, typename ValueT>
auto CanonicalValueStore<IdT, KeyT, ValueT>::Reserve(size_t size) -> void {
// Compute the resulting new insert count using the size of values -- the
// set doesn't have a fast to compute current size.
if (size > values_.size()) {
set_.GrowForInsertCount(size - values_.size(), KeyContext(&values_));
}
values_.Reserve(size);
}
} // namespace Carbon
#endif // CARBON_TOOLCHAIN_BASE_CANONICAL_VALUE_STORE_H_
<|fim_middle|> auto bytes = set_.ComputeMetrics(KeyContext(&values_)).storage_bytes;
mem_usage.Add(MemUsage::ConcatLabel(label, "set_"), bytes, bytes);
|
#define CARBON_TOOLCHAIN_BASE_CANONICAL_VALUE_STORE_H_
#include "common/hashtable_key_context.h"
#include "common/set.h"
#include "toolchain/base/mem_usage.h"
#include "toolchain/base/value_store.h"
#include "toolchain/base/value_store_types.h"
#include "toolchain/base/yaml.h"
namespace Carbon {
// A wrapper for accumulating immutable values with deduplication, providing IDs
// to later retrieve the value.
//
// `ValueT` represents the type being stored.
//
// `KeyT` can optionally be different from `ValueT`, and if so is used for the
// argument to `Lookup`. It must be valid to use both `KeyT` and `ValueT` as
// lookup types in the underlying `Set`.
template <typename IdT, typename KeyT, typename ValueT = KeyT>
class CanonicalValueStore {
public:
using KeyType = std::remove_cvref_t<KeyT>;
using ValueType = ValueStoreTypes<ValueT>::ValueType;
using RefType = ValueStoreTypes<ValueT>::RefType;
using ConstRefType = ValueStoreTypes<ValueT>::ConstRefType;
// Stores a canonical copy of the value and returns an ID to reference it.
auto Add(ValueType value) -> IdT;
// Returns the value for an ID.
auto Get(IdT id) const -> ConstRefType { return values_.Get(id); }
// Looks up the canonical ID for a value, or returns `None` if not in the
// store.
auto Lookup(KeyType key) const -> IdT;
// Reserves space.
auto Reserve(size_t size) -> void;
// These are to support printable structures, and are not guaranteed.
auto OutputYaml() const -> Yaml::OutputMapping {
return values_.OutputYaml();
}
auto values() const [[clang::lifetimebound]]
-> ValueStore<IdT, ValueType>::Range {
return values_.values();
}
auto size() const -> size_t { return values_.size(); }
// Collects memory usage of the values and deduplication set.
auto CollectMemUsage(MemUsage& mem_usage, llvm::StringRef label) const
-> void {
mem_usage.Collect(MemUsage::ConcatLabel(label, "values_"), values_);
|
auto bytes = set_.ComputeMetrics(KeyContext(&values_)).storage_bytes;
mem_usage.Add(MemUsage::ConcatLabel(label, "set_"), bytes, bytes);
|
}
private:
class KeyContext;
ValueStore<IdT, ValueType> values_;
Set<IdT, /*SmallSize=*/0, KeyContext> set_;
};
template <typename IdT, typename KeyT, typename ValueT>
class CanonicalValueStore<IdT, KeyT, ValueT>::KeyContext
: public TranslatingKeyContext<KeyContext> {
public:
explicit KeyContext(const ValueStore<IdT, ValueType>* values)
: values_(values) {}
// Note that it is safe to return a `const` reference here as the underlying
// object's lifetime is provided by the `ValueStore`.
auto TranslateKey(IdT id) const -> ConstRefType { return values_->Get(id); }
private:
const ValueStore<IdT, ValueType>* values_;
};
template <typename IdT, typename KeyT, typename ValueT>
auto CanonicalValueStore<IdT, KeyT, ValueT>::Add(ValueType value) -> IdT {
auto make_key = [&] { return IdT(values_.Add(std::move(value))); };
return set_.Insert(value, make_key, KeyContext(&values_)).key();
}
template <typename IdT, typename KeyT, typename ValueT>
auto CanonicalValueStore<IdT, KeyT, ValueT>::Lookup(KeyType key) const -> IdT {
if (auto result = set_.Lookup(key, KeyContext(&values_))) {
return result.key();
}
return IdT::None;
}
template <typename IdT, typename KeyT, typename ValueT>
auto CanonicalValueStore<IdT, KeyT, ValueT>::Reserve(size_t size) -> void {
// Compute the resulting new insert count using the size of values -- the
// set doesn't have a fast to compute current size.
if (size > values_.size()) {
set_.GrowForInsertCount(size - values_.size(), KeyContext(&values_));
}
values_.Reserve(size);
}
} // namespace Carbon
#endif // CARBON_TOOLCHAIN_BASE_CANONICAL_VALUE_STORE_H_
|
random
|
<|fim_prefix|>rned string must be freed with the delete [] operator.
*/
char *TessBaseAPI::GetTSVText(int page_number) {
if (tesseract_ == nullptr || (page_res_ == nullptr && Recognize(nullptr) < 0)) {
return nullptr;
}
#if !defined(NDEBUG)
int lcnt = 1, bcnt = 1, pcnt = 1, wcnt = 1;
#endif
int page_id = page_number + 1; // we use 1-based page numbers.
int page_num = page_id;
int block_num = 0;
int par_num = 0;
int line_num = 0;
int word_num = 0;
std::string tsv_str;
tsv_str += "1\t" + std::to_string(page_num); // level 1 - page
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
tsv_str += "\t" + std::to_string(rect_left_);
tsv_str += "\t" + std::to_string(rect_top_);
tsv_str += "\t" + std::to_string(rect_width_);
tsv_str += "\t" + std::to_string(rect_height_);
tsv_str += "\t-1\t\n";
const std::unique_ptr</*non-const*/ ResultIterator> res_it(GetIterator());
while (!res_it->Empty(RIL_BLOCK)) {
if (res_it->Empty(RIL_WORD)) {
res_it->Next(RIL_WORD);
continue;
}
// Add rows for any new block/paragraph/textline.
if (res_it->IsAtBeginningOf(RIL_BLOCK)) {
block_num++;
par_num = 0;
line_num = 0;
word_num = 0;
tsv_str += "2\t" + std::to_string(page_num); // level 2 - block
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_BLOCK, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for block
}
if (res_it->IsAtBeginningOf(RIL_PARA)) {
par_num++;
line_num = 0;
word_num = 0;
tsv_str += "3\t" + std::to_string(page_num); // level 3 - paragraph
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
<|fim_suffix|> // end of row for para
}
if (res_it->IsAtBeginningOf(RIL_TEXTLINE)) {
line_num++;
word_num = 0;
tsv_str += "4\t" + std::to_string(page_num); // level 4 - line
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_TEXTLINE, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for line
}
// Now, process the word...
int left, top, right, bottom;
res_it->BoundingBox(RIL_WORD, &left, &top, &right, &bottom);
word_num++;
tsv_str += "5\t" + std::to_string(page_num); // level 5 - word
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
tsv_str += "\t" + std::to_string(left);
tsv_str += "\t" + std::to_string(top);
tsv_str += "\t" + std::to_string(right - left);
tsv_str += "\t" + std::to_string(bottom - top);
tsv_str += "\t" + std::to_string(res_it->Confidence(RIL_WORD));
tsv_str += "\t";
#if !defined(NDEBUG)
// Increment counts if at end of block/paragraph/textline.
if (res_it->IsAtFinalElement(RIL_TEXTLINE, RIL_WORD)) {
lcnt++;
}
if (res_it->IsAtFinalElement(RIL_PARA, RIL_WORD)) {
pcnt++;
}
if (res_it->IsAtFinalElement(RIL_BLOCK, RIL_WORD)) {
bcnt++;
}
#endif
do {
tsv_str += std::unique_ptr<const char[]>(res_it->GetUTF8Text(RIL_SYMBOL)).get();
res_it->Next(RIL_SYMBOL);
} while (!res_it->Empty(RIL_BLOCK) && !res_it->IsAtBeginningOf(RIL_WORD));
tsv_str += "\n"; // end of row
#if !defined(NDEBUG)
wcnt++;
#endif
}
return copy_string(tsv_str);
}
/** The 5 numbers output for each box (the usual 4 and a page number.) */
const int kNumbersPerBlob = 5;
/**
* The number of bytes taken by each number. Since we use<|fim_middle|>tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_PARA, tsv_str);
tsv_str += "\t-1\t\n";
|
rned string must be freed with the delete [] operator.
*/
char *TessBaseAPI::GetTSVText(int page_number) {
if (tesseract_ == nullptr || (page_res_ == nullptr && Recognize(nullptr) < 0)) {
return nullptr;
}
#if !defined(NDEBUG)
int lcnt = 1, bcnt = 1, pcnt = 1, wcnt = 1;
#endif
int page_id = page_number + 1; // we use 1-based page numbers.
int page_num = page_id;
int block_num = 0;
int par_num = 0;
int line_num = 0;
int word_num = 0;
std::string tsv_str;
tsv_str += "1\t" + std::to_string(page_num); // level 1 - page
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
tsv_str += "\t" + std::to_string(rect_left_);
tsv_str += "\t" + std::to_string(rect_top_);
tsv_str += "\t" + std::to_string(rect_width_);
tsv_str += "\t" + std::to_string(rect_height_);
tsv_str += "\t-1\t\n";
const std::unique_ptr</*non-const*/ ResultIterator> res_it(GetIterator());
while (!res_it->Empty(RIL_BLOCK)) {
if (res_it->Empty(RIL_WORD)) {
res_it->Next(RIL_WORD);
continue;
}
// Add rows for any new block/paragraph/textline.
if (res_it->IsAtBeginningOf(RIL_BLOCK)) {
block_num++;
par_num = 0;
line_num = 0;
word_num = 0;
tsv_str += "2\t" + std::to_string(page_num); // level 2 - block
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_BLOCK, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for block
}
if (res_it->IsAtBeginningOf(RIL_PARA)) {
par_num++;
line_num = 0;
word_num = 0;
tsv_str += "3\t" + std::to_string(page_num); // level 3 - paragraph
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
|
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_PARA, tsv_str);
tsv_str += "\t-1\t\n";
|
// end of row for para
}
if (res_it->IsAtBeginningOf(RIL_TEXTLINE)) {
line_num++;
word_num = 0;
tsv_str += "4\t" + std::to_string(page_num); // level 4 - line
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_TEXTLINE, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for line
}
// Now, process the word...
int left, top, right, bottom;
res_it->BoundingBox(RIL_WORD, &left, &top, &right, &bottom);
word_num++;
tsv_str += "5\t" + std::to_string(page_num); // level 5 - word
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
tsv_str += "\t" + std::to_string(left);
tsv_str += "\t" + std::to_string(top);
tsv_str += "\t" + std::to_string(right - left);
tsv_str += "\t" + std::to_string(bottom - top);
tsv_str += "\t" + std::to_string(res_it->Confidence(RIL_WORD));
tsv_str += "\t";
#if !defined(NDEBUG)
// Increment counts if at end of block/paragraph/textline.
if (res_it->IsAtFinalElement(RIL_TEXTLINE, RIL_WORD)) {
lcnt++;
}
if (res_it->IsAtFinalElement(RIL_PARA, RIL_WORD)) {
pcnt++;
}
if (res_it->IsAtFinalElement(RIL_BLOCK, RIL_WORD)) {
bcnt++;
}
#endif
do {
tsv_str += std::unique_ptr<const char[]>(res_it->GetUTF8Text(RIL_SYMBOL)).get();
res_it->Next(RIL_SYMBOL);
} while (!res_it->Empty(RIL_BLOCK) && !res_it->IsAtBeginningOf(RIL_WORD));
tsv_str += "\n"; // end of row
#if !defined(NDEBUG)
wcnt++;
#endif
}
return copy_string(tsv_str);
}
/** The 5 numbers output for each box (the usual 4 and a page number.) */
const int kNumbersPerBlob = 5;
/**
* The number of bytes taken by each number. Since we use
|
ast_based
|
<|fim_prefix|> role_map[DisplayServer::AccessibilityRole::ROLE_SPLITTER] = ACCESSKIT_ROLE_SPLITTER;
role_map[DisplayServer::AccessibilityRole::ROLE_SLIDER] = ACCESSKIT_ROLE_SLIDER;
role_map[DisplayServer::AccessibilityRole::ROLE_SPIN_BUTTON] = ACCESSKIT_ROLE_SPIN_BUTTON;
role_map[DisplayServer::AccessibilityRole::ROLE_PROGRESS_INDICATOR] = ACCESSKIT_ROLE_PROGRESS_INDICATOR;
role_map[DisplayServer::AccessibilityRole::ROLE_TEXT_FIELD] = ACCESSKIT_ROLE_TEXT_INPUT;
role_map[DisplayServer::AccessibilityRole::ROLE_MULTILINE_TEXT_FIELD] = ACCESSKIT_ROLE_MULTILINE_TEXT_INPUT;
role_map[DisplayServer::AccessibilityRole::ROLE_COLOR_PICKER] = ACCESSKIT_ROLE_COLOR_WELL;
role_map[DisplayServer::AccessibilityRole::ROLE_TABLE] = ACCESSKIT_ROLE_TABLE;
role_map[DisplayServer::AccessibilityRole::ROLE_CELL] = ACCESSKIT_ROLE_CELL;
role_map[DisplayServer::AccessibilityRole::ROLE_ROW] = ACCESSKIT_ROLE_ROW;
role_map[DisplayServer::AccessibilityRole::ROLE_ROW_GROUP] = ACCESSKIT_ROLE_ROW_GROUP;
role_map[DisplayServer::AccessibilityRole::ROLE_ROW_HEADER] = ACCESSKIT_ROLE_ROW_HEADER;
role_map[DisplayServer::AccessibilityRole::ROLE_COLUMN_HEADER] = ACCESSKIT_ROLE_COLUMN_HEADER;
role_map[DisplayServer::AccessibilityRole::ROLE_TREE] = ACCESSKIT_ROLE_TREE;
role_map[DisplayServer::AccessibilityRole::ROLE_TREE_ITEM] = ACCESSKIT_ROLE_TREE_ITEM;
role_map[DisplayServer::AccessibilityRole::ROLE_LIST] = ACCESSKIT_ROLE_LIST;
role_map[DisplayServer::AccessibilityRole::ROLE_LIST_ITEM] = ACCESSKIT_ROLE_LIST_ITEM;
role_map[DisplayServer::AccessibilityRole::ROLE_LIST_BOX] = ACCESSKIT_ROLE_LIST_BOX;
role_map[DisplayServer::AccessibilityRole::ROLE_LIST_BOX_OPTION] = ACCESSKIT_ROLE_LIST_BOX_OPTION;
role_map[DisplayServer::AccessibilityRole::ROLE_TAB_BAR] = ACCESSKIT_ROLE_TAB_LIST;
role_map[DisplayServer::AccessibilityRole::ROLE_TAB] = ACCESSKIT_ROLE_TAB;
role_map[DisplayServer::AccessibilityRole::ROLE_TAB_PANEL] = ACCESSKIT_ROLE_TAB_PANEL;<|fim_suffix|> role_map[DisplayServer::AccessibilityRole::ROLE_IMAGE] = ACCESSKIT_ROLE_IMAGE;
role_map[DisplayServer::AccessibilityRole::ROLE_WINDOW] = ACCESSKIT_ROLE_WINDOW;
role_map[DisplayServer::AccessibilityRole::ROLE_TITLE_BAR] = ACCESSKIT_ROLE_TITLE_BAR;
role_map[DisplayServer::AccessibilityRole::ROLE_DIALOG] = ACCESSKIT_ROLE_DIALOG;
role_map[DisplayServer::AccessibilityRole::ROLE_TOOLTIP] = ACCESSKIT_ROLE_TOOLTIP;
action_map[DisplayServer::AccessibilityAction::ACTION_CLICK] = ACCESSKIT_ACTION_CLICK;
action_map[DisplayServer::AccessibilityAction::ACTION_FOCUS] = ACCESSKIT_ACTION_FOCUS;
action_map[DisplayServer::AccessibilityAction::ACTION_BLUR] = ACCESSKIT_ACTION_BLUR;
action_map[DisplayServer::AccessibilityAction::ACTION_COLLAPSE] = ACCESSKIT_ACTION_COLLAPSE;
action_map[DisplayServer::AccessibilityAction::ACTION_EXPAND] = ACCESSKIT_ACTION_EXPAND;
action_map[DisplayServer::AccessibilityAction::ACTION_DECREMENT] = ACCESSKIT_ACTION_DECREMENT;
action_map[DisplayServer::AccessibilityAction::ACTION_INCREMENT] = ACCESSKIT_ACTION_INCREMENT;
action_map[DisplayServer::AccessibilityAction::ACTION_HIDE_TOOLTIP] = ACCESSKIT_ACTION_HIDE_TOOLTIP;
action_map[DisplayServer::AccessibilityAction::ACTION_SHOW_TOOLTIP] = ACCESSKIT_ACTION_SHOW_TOOLTIP;
//action_map[DisplayServer::AccessibilityAction::ACTION_INVALIDATE_TREE] = ACCESSKIT_ACTION_INVALIDATE_TREE;
//action_map[DisplayServer::AccessibilityAction::ACTION_LOAD_INLINE_TEXT_BOXES] = ACCESSKIT_ACTION_LOAD_INLINE_TEXT_BOXES;
action_map[DisplayServer::AccessibilityAction::ACTION_SET_TEXT_SELECTION] = ACCESSKIT_ACTION_SET_TEXT_SELECTION;
action_map[DisplayServer::AccessibilityAction::ACTION_REPLACE_SELECTED_TEXT] = ACCESSKIT_ACTION_REPLACE_SELECTED_TEXT;
action_map[DisplayServer::AccessibilityAction::ACTION_SCROLL_BACKWARD] = ACCESSKIT_ACTION_SCROLL_UP;
action_map[DisplayServer::AccessibilityAction::ACTION_SCROLL_DOWN] = ACCESSKIT_ACTION_SCROLL_DOWN;<|fim_middle|> role_map[DisplayServer::AccessibilityRole::ROLE_MENU_BAR] = ACCESSKIT_ROLE_MENU_BAR;
role_map[DisplayServer::AccessibilityRole::ROLE_MENU] = ACCESSKIT_ROLE_MENU;
role_map[DisplayServer::AccessibilityRole::ROLE_MENU_ITEM] = ACCESSKIT_ROLE_MENU_ITEM;
role_map[DisplayServer::AccessibilityRole::ROLE_MENU_ITEM_CHECK_BOX] = ACCESSKIT_ROLE_MENU_ITEM_CHECK_BOX;
role_map[DisplayServer::AccessibilityRole::ROLE_MENU_ITEM_RADIO] = ACCESSKIT_ROLE_MENU_ITEM_RADIO;
|
role_map[DisplayServer::AccessibilityRole::ROLE_SPLITTER] = ACCESSKIT_ROLE_SPLITTER;
role_map[DisplayServer::AccessibilityRole::ROLE_SLIDER] = ACCESSKIT_ROLE_SLIDER;
role_map[DisplayServer::AccessibilityRole::ROLE_SPIN_BUTTON] = ACCESSKIT_ROLE_SPIN_BUTTON;
role_map[DisplayServer::AccessibilityRole::ROLE_PROGRESS_INDICATOR] = ACCESSKIT_ROLE_PROGRESS_INDICATOR;
role_map[DisplayServer::AccessibilityRole::ROLE_TEXT_FIELD] = ACCESSKIT_ROLE_TEXT_INPUT;
role_map[DisplayServer::AccessibilityRole::ROLE_MULTILINE_TEXT_FIELD] = ACCESSKIT_ROLE_MULTILINE_TEXT_INPUT;
role_map[DisplayServer::AccessibilityRole::ROLE_COLOR_PICKER] = ACCESSKIT_ROLE_COLOR_WELL;
role_map[DisplayServer::AccessibilityRole::ROLE_TABLE] = ACCESSKIT_ROLE_TABLE;
role_map[DisplayServer::AccessibilityRole::ROLE_CELL] = ACCESSKIT_ROLE_CELL;
role_map[DisplayServer::AccessibilityRole::ROLE_ROW] = ACCESSKIT_ROLE_ROW;
role_map[DisplayServer::AccessibilityRole::ROLE_ROW_GROUP] = ACCESSKIT_ROLE_ROW_GROUP;
role_map[DisplayServer::AccessibilityRole::ROLE_ROW_HEADER] = ACCESSKIT_ROLE_ROW_HEADER;
role_map[DisplayServer::AccessibilityRole::ROLE_COLUMN_HEADER] = ACCESSKIT_ROLE_COLUMN_HEADER;
role_map[DisplayServer::AccessibilityRole::ROLE_TREE] = ACCESSKIT_ROLE_TREE;
role_map[DisplayServer::AccessibilityRole::ROLE_TREE_ITEM] = ACCESSKIT_ROLE_TREE_ITEM;
role_map[DisplayServer::AccessibilityRole::ROLE_LIST] = ACCESSKIT_ROLE_LIST;
role_map[DisplayServer::AccessibilityRole::ROLE_LIST_ITEM] = ACCESSKIT_ROLE_LIST_ITEM;
role_map[DisplayServer::AccessibilityRole::ROLE_LIST_BOX] = ACCESSKIT_ROLE_LIST_BOX;
role_map[DisplayServer::AccessibilityRole::ROLE_LIST_BOX_OPTION] = ACCESSKIT_ROLE_LIST_BOX_OPTION;
role_map[DisplayServer::AccessibilityRole::ROLE_TAB_BAR] = ACCESSKIT_ROLE_TAB_LIST;
role_map[DisplayServer::AccessibilityRole::ROLE_TAB] = ACCESSKIT_ROLE_TAB;
role_map[DisplayServer::AccessibilityRole::ROLE_TAB_PANEL] = ACCESSKIT_ROLE_TAB_PANEL;
|
role_map[DisplayServer::AccessibilityRole::ROLE_MENU_BAR] = ACCESSKIT_ROLE_MENU_BAR;
role_map[DisplayServer::AccessibilityRole::ROLE_MENU] = ACCESSKIT_ROLE_MENU;
role_map[DisplayServer::AccessibilityRole::ROLE_MENU_ITEM] = ACCESSKIT_ROLE_MENU_ITEM;
role_map[DisplayServer::AccessibilityRole::ROLE_MENU_ITEM_CHECK_BOX] = ACCESSKIT_ROLE_MENU_ITEM_CHECK_BOX;
role_map[DisplayServer::AccessibilityRole::ROLE_MENU_ITEM_RADIO] = ACCESSKIT_ROLE_MENU_ITEM_RADIO;
|
role_map[DisplayServer::AccessibilityRole::ROLE_IMAGE] = ACCESSKIT_ROLE_IMAGE;
role_map[DisplayServer::AccessibilityRole::ROLE_WINDOW] = ACCESSKIT_ROLE_WINDOW;
role_map[DisplayServer::AccessibilityRole::ROLE_TITLE_BAR] = ACCESSKIT_ROLE_TITLE_BAR;
role_map[DisplayServer::AccessibilityRole::ROLE_DIALOG] = ACCESSKIT_ROLE_DIALOG;
role_map[DisplayServer::AccessibilityRole::ROLE_TOOLTIP] = ACCESSKIT_ROLE_TOOLTIP;
action_map[DisplayServer::AccessibilityAction::ACTION_CLICK] = ACCESSKIT_ACTION_CLICK;
action_map[DisplayServer::AccessibilityAction::ACTION_FOCUS] = ACCESSKIT_ACTION_FOCUS;
action_map[DisplayServer::AccessibilityAction::ACTION_BLUR] = ACCESSKIT_ACTION_BLUR;
action_map[DisplayServer::AccessibilityAction::ACTION_COLLAPSE] = ACCESSKIT_ACTION_COLLAPSE;
action_map[DisplayServer::AccessibilityAction::ACTION_EXPAND] = ACCESSKIT_ACTION_EXPAND;
action_map[DisplayServer::AccessibilityAction::ACTION_DECREMENT] = ACCESSKIT_ACTION_DECREMENT;
action_map[DisplayServer::AccessibilityAction::ACTION_INCREMENT] = ACCESSKIT_ACTION_INCREMENT;
action_map[DisplayServer::AccessibilityAction::ACTION_HIDE_TOOLTIP] = ACCESSKIT_ACTION_HIDE_TOOLTIP;
action_map[DisplayServer::AccessibilityAction::ACTION_SHOW_TOOLTIP] = ACCESSKIT_ACTION_SHOW_TOOLTIP;
//action_map[DisplayServer::AccessibilityAction::ACTION_INVALIDATE_TREE] = ACCESSKIT_ACTION_INVALIDATE_TREE;
//action_map[DisplayServer::AccessibilityAction::ACTION_LOAD_INLINE_TEXT_BOXES] = ACCESSKIT_ACTION_LOAD_INLINE_TEXT_BOXES;
action_map[DisplayServer::AccessibilityAction::ACTION_SET_TEXT_SELECTION] = ACCESSKIT_ACTION_SET_TEXT_SELECTION;
action_map[DisplayServer::AccessibilityAction::ACTION_REPLACE_SELECTED_TEXT] = ACCESSKIT_ACTION_REPLACE_SELECTED_TEXT;
action_map[DisplayServer::AccessibilityAction::ACTION_SCROLL_BACKWARD] = ACCESSKIT_ACTION_SCROLL_UP;
action_map[DisplayServer::AccessibilityAction::ACTION_SCROLL_DOWN] = ACCESSKIT_ACTION_SCROLL_DOWN;
|
random
|
<|fim_prefix|>// flagged by data_size = 0.
int TessBaseAPI::Init(const char *data, int data_size, const char *language, OcrEngineMode oem,
char **configs, int configs_size, const std::vector<std::string> *vars_vec,
const std::vector<std::string> *vars_values, bool set_only_non_debug_params,
FileReader reader) {
if (language == nullptr) {
language = "";
}
if (data == nullptr) {
data = "";
}
std::string datapath = data_size == 0 ? data : language;
// If the datapath, OcrEngineMode or the language have changed - start again.
// Note that the language_ field stores the last requested language that was
// initialized successfully, while tesseract_->lang stores the language
// actually used. They differ only if the requested language was nullptr, in
// which case tesseract_->lang is set to the Tesseract default ("eng").
if (tesseract_ != nullptr &&
(datapath_.empty() || language_.empty() || datapath_ != datapath ||
last_oem_requested_ != oem || (language_ != language && tesseract_->lang != language))) {
delete tesseract_;
tesseract_ = nullptr;
}
bool reset_classifier = true;
if (tesseract_ == nullptr) {
reset_classifier = false;
tesseract_ = new Tesseract;
if (reader != nullptr) {
reader_ = reader;
}
TessdataManager mgr(reader_);
if (data_size != 0) {
mgr.LoadMemBuffer(language, data, data_size);
}
if (tesseract_->init_tesseract(datapath, output_file_, language, oem, configs,
configs_size, vars_vec, vars_values, set_only_non_debug_params,
&mgr) != 0) {
return -1;
}
}
// Update datapath and language requested for the last valid initialization.
datapath_ = std::move(datapath);
if (datapath_.empty() && !tesseract_->datadir.empty()) {
datapath_ = tesseract_->datadir;
}
language_ = language;<|fim_suffix|> }
#endif // ndef DISABLED_LEGACY_ENGINE
return 0;
}
/**
* Returns the languages string used in the last valid initialization.
* If the last initialization specified "deu+hin" then that will be
* returned. If hin loaded eng automatically as well, then that will
* not be included in this list. To find the languages actually
* loaded use GetLoadedLanguagesAsVector.
* The returned string should NOT be deleted.
*/
const char *TessBaseAPI::GetInitLanguagesAsString() const {
return language_.c_str();
}
/**
* Returns the loaded languages in the vector of std::string.
* Includes all languages loaded by the last Init, including those loaded
* as dependencies of other loaded languages.
*/
void TessBaseAPI::GetLoadedLanguagesAsVector(std::vector<std::string> *langs) const {
langs->clear();
if (tesseract_ != nullptr) {
langs->push_back(tesseract_->lang);
int num_subs = tesseract_->num_sub_langs();
for (int i = 0; i < num_subs; ++i) {
langs->push_back(tesseract_->get_sub_lang(i)->lang);
}
}
}
/**
* Returns the available languages in the sorted vector of std::string.
*/
void TessBaseAPI::GetAvailableLanguagesAsVector(std::vector<std::string> *langs) const {
langs->clear();
if (tesseract_ != nullptr) {
addAvailableLanguages(tesseract_->datadir, langs);
std::sort(langs->begin(), langs->end());
}
}
/**
* Init only for page layout analysis. Use only for calls to SetImage and
* AnalysePage. Calls that attempt recognition will generate an error.
*/
void TessBaseAPI::InitForAnalysePage() {
if (tesseract_ == nullptr) {
tesseract_ = new Tesseract;
#ifndef DISABLED_LEGACY_ENGINE
tesseract_->InitAdaptiveClassifier(nullptr);
#endif
}
}
/**
* Read a "config" file containing a set of parameter name, value pairs.
* Searches the standard places: tessdata/configs, tessdata/tessconfigs
* and also accepts a relative or absolute path name.
*/
void TessBaseAPI::ReadConfigFile(const char *filename) {<|fim_middle|> last_oem_requested_ = oem;
#ifndef DISABLED_LEGACY_ENGINE
// For same language and datapath, just reset the adaptive classifier.
if (reset_classifier) {
tesseract_->ResetAdaptiveClassifier();
|
// flagged by data_size = 0.
int TessBaseAPI::Init(const char *data, int data_size, const char *language, OcrEngineMode oem,
char **configs, int configs_size, const std::vector<std::string> *vars_vec,
const std::vector<std::string> *vars_values, bool set_only_non_debug_params,
FileReader reader) {
if (language == nullptr) {
language = "";
}
if (data == nullptr) {
data = "";
}
std::string datapath = data_size == 0 ? data : language;
// If the datapath, OcrEngineMode or the language have changed - start again.
// Note that the language_ field stores the last requested language that was
// initialized successfully, while tesseract_->lang stores the language
// actually used. They differ only if the requested language was nullptr, in
// which case tesseract_->lang is set to the Tesseract default ("eng").
if (tesseract_ != nullptr &&
(datapath_.empty() || language_.empty() || datapath_ != datapath ||
last_oem_requested_ != oem || (language_ != language && tesseract_->lang != language))) {
delete tesseract_;
tesseract_ = nullptr;
}
bool reset_classifier = true;
if (tesseract_ == nullptr) {
reset_classifier = false;
tesseract_ = new Tesseract;
if (reader != nullptr) {
reader_ = reader;
}
TessdataManager mgr(reader_);
if (data_size != 0) {
mgr.LoadMemBuffer(language, data, data_size);
}
if (tesseract_->init_tesseract(datapath, output_file_, language, oem, configs,
configs_size, vars_vec, vars_values, set_only_non_debug_params,
&mgr) != 0) {
return -1;
}
}
// Update datapath and language requested for the last valid initialization.
datapath_ = std::move(datapath);
if (datapath_.empty() && !tesseract_->datadir.empty()) {
datapath_ = tesseract_->datadir;
}
language_ = language;
|
last_oem_requested_ = oem;
#ifndef DISABLED_LEGACY_ENGINE
// For same language and datapath, just reset the adaptive classifier.
if (reset_classifier) {
tesseract_->ResetAdaptiveClassifier();
|
}
#endif // ndef DISABLED_LEGACY_ENGINE
return 0;
}
/**
* Returns the languages string used in the last valid initialization.
* If the last initialization specified "deu+hin" then that will be
* returned. If hin loaded eng automatically as well, then that will
* not be included in this list. To find the languages actually
* loaded use GetLoadedLanguagesAsVector.
* The returned string should NOT be deleted.
*/
const char *TessBaseAPI::GetInitLanguagesAsString() const {
return language_.c_str();
}
/**
* Returns the loaded languages in the vector of std::string.
* Includes all languages loaded by the last Init, including those loaded
* as dependencies of other loaded languages.
*/
void TessBaseAPI::GetLoadedLanguagesAsVector(std::vector<std::string> *langs) const {
langs->clear();
if (tesseract_ != nullptr) {
langs->push_back(tesseract_->lang);
int num_subs = tesseract_->num_sub_langs();
for (int i = 0; i < num_subs; ++i) {
langs->push_back(tesseract_->get_sub_lang(i)->lang);
}
}
}
/**
* Returns the available languages in the sorted vector of std::string.
*/
void TessBaseAPI::GetAvailableLanguagesAsVector(std::vector<std::string> *langs) const {
langs->clear();
if (tesseract_ != nullptr) {
addAvailableLanguages(tesseract_->datadir, langs);
std::sort(langs->begin(), langs->end());
}
}
/**
* Init only for page layout analysis. Use only for calls to SetImage and
* AnalysePage. Calls that attempt recognition will generate an error.
*/
void TessBaseAPI::InitForAnalysePage() {
if (tesseract_ == nullptr) {
tesseract_ = new Tesseract;
#ifndef DISABLED_LEGACY_ENGINE
tesseract_->InitAdaptiveClassifier(nullptr);
#endif
}
}
/**
* Read a "config" file containing a set of parameter name, value pairs.
* Searches the standard places: tessdata/configs, tessdata/tessconfigs
* and also accepts a relative or absolute path name.
*/
void TessBaseAPI::ReadConfigFile(const char *filename) {
|
random
|
<|fim_prefix|>/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/input/input_event.h"
#include "core/math/rect2.h"
#include "core/os/memory.h"
#include "core/variant/array.h"
#include "tests/test_macros.h"
namespace TestInputEvent {
TEST_CASE("[InputEvent] Signal is emitted when device is changed") {
Ref<InputEventKey> input_event;
input_event.instantiate();
SIGNAL_WATCH(*input_event, CoreStringName(changed));
Array empty_args = { {} };
input_event->set_device(1);
SIGNAL_CHECK("changed", empty_args);
CHECK(input_event->get_device() == 1);
SIGNAL_UNWATCH(*input_event, CoreStringName(changed));
}
TEST_CASE("[InputEvent] Test accumulate") {
Ref<InputEventMouseMotion> iemm1, iemm2;
Ref<InputEventKey> iek;
iemm1.instantiate(), iemm2.instantiate();
iek.instantiate();
iemm1->set_button_mask(MouseButtonMask::LEFT);
CHECK_FALSE(iemm1->accumulate(iemm2));
iemm2->set_button_mask(MouseButtonMask::LEFT);
CHECK(iemm1->accumulate(iemm2));
CHECK_FALSE(iemm1->accumulate(iek));
CHECK_FALSE(iemm2->accumulate(iek));
}
TEST_CASE("[InputEvent][SceneTree] Test methods that interact with the InputMap") {
const String mock_action = "mock_action";<|fim_suffix|>
CHECK(iejm->is_action_released(mock_action));
CHECK(Math::is_equal_approx(iejm->get_action_strength(mock_action), 0.0f));
iejm->set_axis_value(0.8f);
// Since deadzone is 0.5, action_strength grows linearly from 0.5 to 1.0.
CHECK(Math::is_equal_approx(iejm->get_action_strength(mock_action), 0.6f));
CHECK(Math::is_equal_approx(iejm->get_action_raw_strength(mock_action), 0.8f));
CHECK(iejm->is_action_pressed(mock_action));
InputMap::get_singleton()->erase_action(mock_action);
}
TEST_CASE("[InputEvent] Test xformed_by") {
Ref<InputEventMouseMotion> iemm1;
iemm1.instantiate();
iemm1->set_position(Vector2(0.0f, 0.0f));
Transform2D transform;
transform = transform.translated(Vector2(2.0f, 3.0f));
Ref<InputEventMouseMotion> iemm2 = iemm1->xformed_by(transform);
CHECK(iemm2->get_position().is_equal_approx(Vector2(2.0f, 3.0f)));
}
} // namespace TestInputEvent
<|fim_middle|> Ref<InputEventJoypadMotion> iejm;
iejm.instantiate();
InputMap::get_singleton()->add_action(mock_action, 0.5);
InputMap::get_singleton()->action_add_event(mock_action, iejm);
CHECK(iejm->is_action_type());
CHECK(iejm->is_action(mock_action));
|
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/input/input_event.h"
#include "core/math/rect2.h"
#include "core/os/memory.h"
#include "core/variant/array.h"
#include "tests/test_macros.h"
namespace TestInputEvent {
TEST_CASE("[InputEvent] Signal is emitted when device is changed") {
Ref<InputEventKey> input_event;
input_event.instantiate();
SIGNAL_WATCH(*input_event, CoreStringName(changed));
Array empty_args = { {} };
input_event->set_device(1);
SIGNAL_CHECK("changed", empty_args);
CHECK(input_event->get_device() == 1);
SIGNAL_UNWATCH(*input_event, CoreStringName(changed));
}
TEST_CASE("[InputEvent] Test accumulate") {
Ref<InputEventMouseMotion> iemm1, iemm2;
Ref<InputEventKey> iek;
iemm1.instantiate(), iemm2.instantiate();
iek.instantiate();
iemm1->set_button_mask(MouseButtonMask::LEFT);
CHECK_FALSE(iemm1->accumulate(iemm2));
iemm2->set_button_mask(MouseButtonMask::LEFT);
CHECK(iemm1->accumulate(iemm2));
CHECK_FALSE(iemm1->accumulate(iek));
CHECK_FALSE(iemm2->accumulate(iek));
}
TEST_CASE("[InputEvent][SceneTree] Test methods that interact with the InputMap") {
const String mock_action = "mock_action";
|
Ref<InputEventJoypadMotion> iejm;
iejm.instantiate();
InputMap::get_singleton()->add_action(mock_action, 0.5);
InputMap::get_singleton()->action_add_event(mock_action, iejm);
CHECK(iejm->is_action_type());
CHECK(iejm->is_action(mock_action));
|
CHECK(iejm->is_action_released(mock_action));
CHECK(Math::is_equal_approx(iejm->get_action_strength(mock_action), 0.0f));
iejm->set_axis_value(0.8f);
// Since deadzone is 0.5, action_strength grows linearly from 0.5 to 1.0.
CHECK(Math::is_equal_approx(iejm->get_action_strength(mock_action), 0.6f));
CHECK(Math::is_equal_approx(iejm->get_action_raw_strength(mock_action), 0.8f));
CHECK(iejm->is_action_pressed(mock_action));
InputMap::get_singleton()->erase_action(mock_action);
}
TEST_CASE("[InputEvent] Test xformed_by") {
Ref<InputEventMouseMotion> iemm1;
iemm1.instantiate();
iemm1->set_position(Vector2(0.0f, 0.0f));
Transform2D transform;
transform = transform.translated(Vector2(2.0f, 3.0f));
Ref<InputEventMouseMotion> iemm2 = iemm1->xformed_by(transform);
CHECK(iemm2->get_position().is_equal_approx(Vector2(2.0f, 3.0f)));
}
} // namespace TestInputEvent
|
random
|
<|fim_prefix|>= true;
} break;
case Image::FORMAT_ASTC_8x8: {
block_x = 8;
block_y = 8;
is_hdr = false;
} break;
case Image::FORMAT_ASTC_8x8_HDR: {
block_x = 8;
block_y = 8;
is_hdr = true;
} break;
default: {
ERR_FAIL_MSG(vformat("astcenc: Cannot decompress Image with a non-ASTC format: %s.", Image::get_format_name(src_format)));
} break;
}
// Initialize astcenc.
const astcenc_profile profile = is_hdr ? ASTCENC_PRF_HDR : ASTCENC_PRF_LDR;
astcenc_config config;
const float quality = ASTCENC_PRE_MEDIUM;
const uint32_t flags = ASTCENC_FLG_DECOMPRESS_ONLY;
astcenc_error status = astcenc_config_init(profile, block_x, block_y, 1, quality, flags, &config);
ERR_FAIL_COND_MSG(status != ASTCENC_SUCCESS,
vformat("astcenc: Configuration initialization failed: %s.", astcenc_get_error_string(status)));
// Context allocation.
astcenc_context *context = nullptr;
const unsigned int thread_count = 1;
status = astcenc_context_alloc(&config, thread_count, &context);
ERR_FAIL_COND_MSG(status != ASTCENC_SUCCESS,
vformat("astcenc: Context allocation failed: %s.", astcenc_get_error_string(status)));
const Image::Format target_format = is_hdr ? Image::FORMAT_RGBAH : Image::FORMAT_RGBA8;
const bool has_mipmaps = r_img->has_mipmaps();
int width = r_img->get_width();
int height = r_img->get_height();
const int64_t dest_size = Image::get_image_data_size(width, height, target_format, has_mipmaps);
Vector<uint8_t> dest_data;
dest_data.resize(dest_size);
uint8_t *dest_write = dest_data.ptrw();
// Decompress image.
const int mip_count = has_mipmaps ? Image::get_image_required_mipmaps(width, height, target_format) : 0;
const uint8_t *src_data = r_img->ptr();
for (int i = 0; i < mip_count + 1; i++) {
const int64_t src_ofs = Image::get_image_mipmap_offset(width, height, src_format, i);
const uint8_t *mip_data = &src_data[src_ofs];
int64_t src_size;
if (i == mip_count) {
src_size = r_img->get_data_size() - src_ofs;
} else {
<|fim_suffix|>
}
int dst_mip_w, dst_mip_h;
const int64_t dst_ofs = Image::get_image_mipmap_offset_and_dimensions(width, height, target_format, i, dst_mip_w, dst_mip_h);
// Ensure that mip offset is a multiple of 8 (etcpak expects uint64_t pointer).
ERR_FAIL_COND(dst_ofs % 8 != 0);
uint8_t *dest_mip_write = &dest_write[dst_ofs];
astcenc_image image;
image.dim_x = dst_mip_w;
image.dim_y = dst_mip_h;
image.dim_z = 1;
image.data_type = is_hdr ? ASTCENC_TYPE_F16 : ASTCENC_TYPE_U8;
image.data = (void **)(&dest_mip_write);
const astcenc_swizzle swizzle = {
ASTCENC_SWZ_R, ASTCENC_SWZ_G, ASTCENC_SWZ_B, ASTCENC_SWZ_A
};
status = astcenc_decompress_image(context, mip_data, src_size, &image, &swizzle, 0);
ERR_BREAK_MSG(status != ASTCENC_SUCCESS, vformat("astcenc: ASTC decompression failed: %s.", astcenc_get_error_string(status)));
ERR_BREAK_MSG(image.dim_z > 1, "astcenc: ASTC decompression failed because this is a 3D texture, which is not supported.");
astcenc_compress_reset(context);
}
astcenc_context_free(context);
// Replace original image with compressed one.
r_img->set_data(width, height, has_mipmaps, target_format, dest_data);
print_verbose(vformat("astcenc: Decompression took %d ms.", OS::get_singleton()->get_ticks_msec() - start_time));
}
<|fim_middle|>src_size = Image::get_image_mipmap_offset(width, height, src_format, i + 1) - src_ofs;
|
= true;
} break;
case Image::FORMAT_ASTC_8x8: {
block_x = 8;
block_y = 8;
is_hdr = false;
} break;
case Image::FORMAT_ASTC_8x8_HDR: {
block_x = 8;
block_y = 8;
is_hdr = true;
} break;
default: {
ERR_FAIL_MSG(vformat("astcenc: Cannot decompress Image with a non-ASTC format: %s.", Image::get_format_name(src_format)));
} break;
}
// Initialize astcenc.
const astcenc_profile profile = is_hdr ? ASTCENC_PRF_HDR : ASTCENC_PRF_LDR;
astcenc_config config;
const float quality = ASTCENC_PRE_MEDIUM;
const uint32_t flags = ASTCENC_FLG_DECOMPRESS_ONLY;
astcenc_error status = astcenc_config_init(profile, block_x, block_y, 1, quality, flags, &config);
ERR_FAIL_COND_MSG(status != ASTCENC_SUCCESS,
vformat("astcenc: Configuration initialization failed: %s.", astcenc_get_error_string(status)));
// Context allocation.
astcenc_context *context = nullptr;
const unsigned int thread_count = 1;
status = astcenc_context_alloc(&config, thread_count, &context);
ERR_FAIL_COND_MSG(status != ASTCENC_SUCCESS,
vformat("astcenc: Context allocation failed: %s.", astcenc_get_error_string(status)));
const Image::Format target_format = is_hdr ? Image::FORMAT_RGBAH : Image::FORMAT_RGBA8;
const bool has_mipmaps = r_img->has_mipmaps();
int width = r_img->get_width();
int height = r_img->get_height();
const int64_t dest_size = Image::get_image_data_size(width, height, target_format, has_mipmaps);
Vector<uint8_t> dest_data;
dest_data.resize(dest_size);
uint8_t *dest_write = dest_data.ptrw();
// Decompress image.
const int mip_count = has_mipmaps ? Image::get_image_required_mipmaps(width, height, target_format) : 0;
const uint8_t *src_data = r_img->ptr();
for (int i = 0; i < mip_count + 1; i++) {
const int64_t src_ofs = Image::get_image_mipmap_offset(width, height, src_format, i);
const uint8_t *mip_data = &src_data[src_ofs];
int64_t src_size;
if (i == mip_count) {
src_size = r_img->get_data_size() - src_ofs;
} else {
|
src_size = Image::get_image_mipmap_offset(width, height, src_format, i + 1) - src_ofs;
|
}
int dst_mip_w, dst_mip_h;
const int64_t dst_ofs = Image::get_image_mipmap_offset_and_dimensions(width, height, target_format, i, dst_mip_w, dst_mip_h);
// Ensure that mip offset is a multiple of 8 (etcpak expects uint64_t pointer).
ERR_FAIL_COND(dst_ofs % 8 != 0);
uint8_t *dest_mip_write = &dest_write[dst_ofs];
astcenc_image image;
image.dim_x = dst_mip_w;
image.dim_y = dst_mip_h;
image.dim_z = 1;
image.data_type = is_hdr ? ASTCENC_TYPE_F16 : ASTCENC_TYPE_U8;
image.data = (void **)(&dest_mip_write);
const astcenc_swizzle swizzle = {
ASTCENC_SWZ_R, ASTCENC_SWZ_G, ASTCENC_SWZ_B, ASTCENC_SWZ_A
};
status = astcenc_decompress_image(context, mip_data, src_size, &image, &swizzle, 0);
ERR_BREAK_MSG(status != ASTCENC_SUCCESS, vformat("astcenc: ASTC decompression failed: %s.", astcenc_get_error_string(status)));
ERR_BREAK_MSG(image.dim_z > 1, "astcenc: ASTC decompression failed because this is a 3D texture, which is not supported.");
astcenc_compress_reset(context);
}
astcenc_context_free(context);
// Replace original image with compressed one.
r_img->set_data(width, height, has_mipmaps, target_format, dest_data);
print_verbose(vformat("astcenc: Decompression took %d ms.", OS::get_singleton()->get_ticks_msec() - start_time));
}
|
ast_based
|
<|fim_prefix|>mespace
void Channel::NotifyOnStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline,
grpc::CompletionQueue* cq, void* tag) {
TagSaver* tag_saver = new TagSaver(tag);
grpc_channel_watch_connectivity_state(c_channel_, last_observed, deadline,
cq->cq(), tag_saver);
}
bool Channel::WaitForStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline) {
grpc::CompletionQueue cq;
bool ok = false;
void* tag = nullptr;
NotifyOnStateChangeImpl(last_observed, deadline, &cq, nullptr);
cq.Next(&tag, &ok);
GRPC_CHECK_EQ(tag, nullptr);
return ok;
}
namespace {
class ShutdownCallback : public grpc_completion_queue_functor {
public:
ShutdownCallback() {
functor_run = &ShutdownCallback::Run;
// Set inlineable to true since this callback is trivial and thus does not
// need to be run from the EventEngine (potentially triggering a thread
// hop). This should only be used by internal callbacks like this and not by
// user application code.
inlineable = true;
}
// TakeCQ takes ownership of the cq into the shutdown callback
// so that the shutdown callback will be responsible for destroying it
void TakeCQ(grpc::CompletionQueue* cq) { cq_ = cq; }
// The Run function will get invoked by the completion queue library
// when the shutdown is actually complete
static void Run(grpc_completion_queue_functor* cb, int) {
auto* callback = static_cast<ShutdownCallback*>(cb);
delete callback->cq_;
delete callback;
}
private:
grpc::CompletionQueue* cq_ = nullptr;
};
} // namespace
::grpc::CompletionQueue* Channel::CallbackCQ() {
// TODO(vjpai): Consider using a single global CQ for the default CQ
// if there is no explicit per-channel CQ registered
CompletionQueue* callback_cq = callback_cq_.load(std::memory_order_acquire);
<|fim_suffix|>
// The callback_cq_ wasn't already set, so grab a lock and set it up exactly
// once for this channel.
grpc::internal::MutexLock l(&mu_);
callback_cq = callback_cq_.load(std::memory_order_relaxed);
if (callback_cq == nullptr) {
if (grpc_iomgr_run_in_background()) {
// gRPC-core provides the backing needed for the preferred CQ type
auto* shutdown_callback = new ShutdownCallback;
callback_cq = new grpc::CompletionQueue(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_CALLBACK, GRPC_CQ_DEFAULT_POLLING,
shutdown_callback});
// Transfer ownership of the new cq to its own shutdown callback
shutdown_callback->TakeCQ(callback_cq);
} else {
// Otherwise we need to use the alternative CQ variant
callback_cq = CompletionQueue::CallbackAlternativeCQ();
}
callback_cq_.store(callback_cq, std::memory_order_release);
}
return callback_cq;
}
} // namespace grpc
<|fim_middle|>if (callback_cq != nullptr) {
return callback_cq;
}
|
mespace
void Channel::NotifyOnStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline,
grpc::CompletionQueue* cq, void* tag) {
TagSaver* tag_saver = new TagSaver(tag);
grpc_channel_watch_connectivity_state(c_channel_, last_observed, deadline,
cq->cq(), tag_saver);
}
bool Channel::WaitForStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline) {
grpc::CompletionQueue cq;
bool ok = false;
void* tag = nullptr;
NotifyOnStateChangeImpl(last_observed, deadline, &cq, nullptr);
cq.Next(&tag, &ok);
GRPC_CHECK_EQ(tag, nullptr);
return ok;
}
namespace {
class ShutdownCallback : public grpc_completion_queue_functor {
public:
ShutdownCallback() {
functor_run = &ShutdownCallback::Run;
// Set inlineable to true since this callback is trivial and thus does not
// need to be run from the EventEngine (potentially triggering a thread
// hop). This should only be used by internal callbacks like this and not by
// user application code.
inlineable = true;
}
// TakeCQ takes ownership of the cq into the shutdown callback
// so that the shutdown callback will be responsible for destroying it
void TakeCQ(grpc::CompletionQueue* cq) { cq_ = cq; }
// The Run function will get invoked by the completion queue library
// when the shutdown is actually complete
static void Run(grpc_completion_queue_functor* cb, int) {
auto* callback = static_cast<ShutdownCallback*>(cb);
delete callback->cq_;
delete callback;
}
private:
grpc::CompletionQueue* cq_ = nullptr;
};
} // namespace
::grpc::CompletionQueue* Channel::CallbackCQ() {
// TODO(vjpai): Consider using a single global CQ for the default CQ
// if there is no explicit per-channel CQ registered
CompletionQueue* callback_cq = callback_cq_.load(std::memory_order_acquire);
|
if (callback_cq != nullptr) {
return callback_cq;
}
|
// The callback_cq_ wasn't already set, so grab a lock and set it up exactly
// once for this channel.
grpc::internal::MutexLock l(&mu_);
callback_cq = callback_cq_.load(std::memory_order_relaxed);
if (callback_cq == nullptr) {
if (grpc_iomgr_run_in_background()) {
// gRPC-core provides the backing needed for the preferred CQ type
auto* shutdown_callback = new ShutdownCallback;
callback_cq = new grpc::CompletionQueue(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_CALLBACK, GRPC_CQ_DEFAULT_POLLING,
shutdown_callback});
// Transfer ownership of the new cq to its own shutdown callback
shutdown_callback->TakeCQ(callback_cq);
} else {
// Otherwise we need to use the alternative CQ variant
callback_cq = CompletionQueue::CallbackAlternativeCQ();
}
callback_cq_.store(callback_cq, std::memory_order_release);
}
return callback_cq;
}
} // namespace grpc
|
ast_based
|
<|fim_prefix|>// Part of the Carbon Language project, under the Apache License v2.0 with LLVM
// Exceptions. See /LICENSE for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#ifndef CARBON_TOOLCHAIN_BASE_BLOCK_VALUE_STORE_H_
#define CARBON_TOOLCHAIN_BASE_BLOCK_VALUE_STORE_H_
#include <type_traits>
#include "common/check.h"
#include "common/set.h"
#include "llvm/Support/Allocator.h"
#include "toolchain/base/mem_usage.h"
#include "toolchain/base/value_store.h"
#include "toolchain/base/yaml.h"
namespace Carbon::SemIR {
// Provides a block-based ValueStore, which uses slab allocation of added
// blocks. This allows references to values to outlast vector resizes that might
// otherwise invalidate references.
//
// BlockValueStore is used as-is, but there are also children that expose the
// protected members for type-specific functionality.
template <typename IdT, typename ElementT>
class BlockValueStore : public Yaml::Printable<BlockValueStore<IdT, ElementT>> {
public:
using IdType = IdT;
using ElementType = ElementT;
using RefType = llvm::MutableArrayRef<ElementT>;
using ConstRefType = llvm::ArrayRef<ElementT>;
explicit BlockValueStore(llvm::BumpPtrAllocator& allocator)
: allocator_(&allocator) {
auto empty = RefType();
<|fim_suffix|>
CARBON_CHECK(empty_val.key() == IdT::Empty);
}
// Adds a block with the given content, returning an ID to reference it.
auto Add(ConstRefType content) -> IdT {
if (content.empty()) {
return IdT::Empty;
}
return values_.Add(AllocateCopy(content));
}
// Returns the requested block.
auto Get(IdT id) const -> ConstRefType { return values_.Get(id); }
// Returns a mutable view of the requested block. This operation should be
// avoided where possible; we generally want blocks to be immutable once
// created.
auto GetMutable(IdT id) -> RefType { return values_.Get(id); }
// Returns a new block formed by applying `transform(elem_id)` to each element
// in the specified block.
template <typename TransformFnT>
auto Transform(IdT id, TransformFnT transform) -> IdT {
llvm::SmallVector<ElementType> block(llvm::map_range(Get(id), transform));
return Add(block);
}
// Adds a block or finds an existing canonical block with the given content,
// and returns an ID to reference it.
auto AddCanonical(ConstRefType content) -> IdT {
if (content.empty()) {
return IdT::Empty;
}
auto result = canonical_blocks_.Insert(
content, [&] { return Add(content); }, KeyContext(this));
return result.key();
}
// Promotes an existing block ID to a canonical block ID, or returns an
// existing canonical block ID if the block was already added. The specified
// block must not be modified after this point.
auto MakeCanonical(IdT id) -> IdT {
// Get the content first so that we don't have unnecessary translation of
// the `id` into the content during insertion.
auto result = canonical_blocks_.Insert(
Get(id), [id] { return id; }, KeyContext(this));
return result.key();
}
auto OutputYaml() const -> Yaml::OutputMapping {
return Yaml::OutputMapping([&](Yaml::OutputMapping::Map map) {
for (auto [block_id, block] : values_.enumerate()) {
map.Add(PrintToStr<|fim_middle|>auto empty_val = canonical_blocks_.Insert(
empty, [&] { return values_.Add(empty); }, KeyContext(this));
|
// Part of the Carbon Language project, under the Apache License v2.0 with LLVM
// Exceptions. See /LICENSE for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#ifndef CARBON_TOOLCHAIN_BASE_BLOCK_VALUE_STORE_H_
#define CARBON_TOOLCHAIN_BASE_BLOCK_VALUE_STORE_H_
#include <type_traits>
#include "common/check.h"
#include "common/set.h"
#include "llvm/Support/Allocator.h"
#include "toolchain/base/mem_usage.h"
#include "toolchain/base/value_store.h"
#include "toolchain/base/yaml.h"
namespace Carbon::SemIR {
// Provides a block-based ValueStore, which uses slab allocation of added
// blocks. This allows references to values to outlast vector resizes that might
// otherwise invalidate references.
//
// BlockValueStore is used as-is, but there are also children that expose the
// protected members for type-specific functionality.
template <typename IdT, typename ElementT>
class BlockValueStore : public Yaml::Printable<BlockValueStore<IdT, ElementT>> {
public:
using IdType = IdT;
using ElementType = ElementT;
using RefType = llvm::MutableArrayRef<ElementT>;
using ConstRefType = llvm::ArrayRef<ElementT>;
explicit BlockValueStore(llvm::BumpPtrAllocator& allocator)
: allocator_(&allocator) {
auto empty = RefType();
|
auto empty_val = canonical_blocks_.Insert(
empty, [&] { return values_.Add(empty); }, KeyContext(this));
|
CARBON_CHECK(empty_val.key() == IdT::Empty);
}
// Adds a block with the given content, returning an ID to reference it.
auto Add(ConstRefType content) -> IdT {
if (content.empty()) {
return IdT::Empty;
}
return values_.Add(AllocateCopy(content));
}
// Returns the requested block.
auto Get(IdT id) const -> ConstRefType { return values_.Get(id); }
// Returns a mutable view of the requested block. This operation should be
// avoided where possible; we generally want blocks to be immutable once
// created.
auto GetMutable(IdT id) -> RefType { return values_.Get(id); }
// Returns a new block formed by applying `transform(elem_id)` to each element
// in the specified block.
template <typename TransformFnT>
auto Transform(IdT id, TransformFnT transform) -> IdT {
llvm::SmallVector<ElementType> block(llvm::map_range(Get(id), transform));
return Add(block);
}
// Adds a block or finds an existing canonical block with the given content,
// and returns an ID to reference it.
auto AddCanonical(ConstRefType content) -> IdT {
if (content.empty()) {
return IdT::Empty;
}
auto result = canonical_blocks_.Insert(
content, [&] { return Add(content); }, KeyContext(this));
return result.key();
}
// Promotes an existing block ID to a canonical block ID, or returns an
// existing canonical block ID if the block was already added. The specified
// block must not be modified after this point.
auto MakeCanonical(IdT id) -> IdT {
// Get the content first so that we don't have unnecessary translation of
// the `id` into the content during insertion.
auto result = canonical_blocks_.Insert(
Get(id), [id] { return id; }, KeyContext(this));
return result.key();
}
auto OutputYaml() const -> Yaml::OutputMapping {
return Yaml::OutputMapping([&](Yaml::OutputMapping::Map map) {
for (auto [block_id, block] : values_.enumerate()) {
map.Add(PrintToStr
|
ast_based
|
<|fim_prefix|>ama_get_model(ctx);
const llama_vocab * vocab = llama_model_get_vocab(model);
const bool add_bos = llama_vocab_get_add_bos(vocab);
tokens_pos = common_tokenize(ctx, pos, add_bos, true);
tokens_neg = common_tokenize(ctx, neg, add_bos, true);
max_seq_len = std::max(tokens_pos.size(), tokens_neg.size());
padding_seq(ctx, tokens_pos, max_seq_len);
padding_seq(ctx, tokens_neg, max_seq_len);
}
void padding_seq(llama_context * ctx, std::vector<llama_token> & tokens, size_t len) {
// TODO: customize padding token
std::vector<llama_token> pad_tokens = common_tokenize(ctx, " ", false);
llama_token pad_tok = pad_tokens.back();
while (tokens.size() < len) {
tokens.push_back(pad_tok);
}
}
};
//////////////////////////////////////////////////
template <typename T>
static std::string to_string(const T & val) {
std::stringstream ss;
ss << val;
return ss.str();
}
static std::vector<std::string> ctrlvec_load_prompt_file(std::string path, bool skip_empty_lines) {
std::vector<std::string> output;
std::ifstream file(path);
if (!file.is_open()) {
fprintf(stderr, "error: unable to open file: %s\n", path.c_str());
exit(1);
}
std::string line;
while (std::getline(file, line)) {
bool is_skip = skip_empty_lines && line.empty();
if (!is_skip) {
string_process_escapes(line);
output.push_back(line);
}
}
file.close();
return output;
}
//////////////////////////////////////////////////
static bool cb_eval(struct ggml_tensor * t, bool ask, void * user_data) {
auto * cb_data = (callback_data *) user_data;
static const char * l_out_name = "l_out";
const bool is_l_out = strncmp(t->name, l_out_name, strlen(l_out_name)) == 0;
if (ask) {
return is_l_out;
}
if (!is_l_out || t->ne[1] != cb_data->n_tokens) {
return true;
}
<|fim_suffix|>
}
static bool get_hidden_layers(llama_context * ctx, std::vector<llama_token> & tokens) {
llama_memory_clear(llama_get_memory(ctx), true);
if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size()))) {
fprintf(stderr, "%s : failed to eval\n", __func__);
return false;
}
return true;
}
static void export_gguf(const std::vector<struct ggml_tensor *> & v_ctrl, const std::string fname, const std::string model_hint) {
struct gguf_context * ctx = gguf_init_empty();
const std::string arch = "controlvector";
gguf_set_val_str(ctx, "general.architecture", arch.c_str());
gguf_set_val_str(ctx, (arch + ".model_hint").c_str(), model_hint.c_str());
gguf_set_val_i32(ctx, (arch + ".layer_count").c_str(), v_ctrl.size());
for (size_t i = 0; i < v_ctrl.size(); ++i) {
gguf_add_tensor(ctx, v_ctrl[i]);
print_debug_tensor(v_ctrl[i]);
printf("Added tensor: %s\n", v_ctrl[i]->name);
}
printf("%s: writing file...\n", __func__);
gguf_write_to_file(ctx, fname.c_str(), false);
printf("%s: wrote file '%s'\n", __func__, fname.c_str());
gguf_free(ctx);
}
/**
* Load prompt files and completion file.
* Then format each pair of prompt + completion to make an entry.
*/
static int prepare_entries(common_params & params, train_context & ctx_train) {
// load prompts
std::vector<std::string> positive_prompts = ctrlvec_load_prompt_file(params.cvector_positive_file, true);
std::vector<std::string> negative_prompts = ctrlvec_load_prompt_file(params.cvector_negative_file, true);
if (positive_prompts.size() != negative_prompts.size()) {
fprintf(stderr, "number of positive and negative prompts must be equal\n");
return 1;
}
if (positive_prompts.empty()) {
fprintf(stderr, "must provide at least one prompt pair\n");
return 1;
}
ctx_train.positive_entries = positive_prompts;
ctx_train.negative_entries = negative_prompts;
retu<|fim_middle|>// save the tensor to current context
cb_data->save_tensor_for_layer(t);
return true;
|
ama_get_model(ctx);
const llama_vocab * vocab = llama_model_get_vocab(model);
const bool add_bos = llama_vocab_get_add_bos(vocab);
tokens_pos = common_tokenize(ctx, pos, add_bos, true);
tokens_neg = common_tokenize(ctx, neg, add_bos, true);
max_seq_len = std::max(tokens_pos.size(), tokens_neg.size());
padding_seq(ctx, tokens_pos, max_seq_len);
padding_seq(ctx, tokens_neg, max_seq_len);
}
void padding_seq(llama_context * ctx, std::vector<llama_token> & tokens, size_t len) {
// TODO: customize padding token
std::vector<llama_token> pad_tokens = common_tokenize(ctx, " ", false);
llama_token pad_tok = pad_tokens.back();
while (tokens.size() < len) {
tokens.push_back(pad_tok);
}
}
};
//////////////////////////////////////////////////
template <typename T>
static std::string to_string(const T & val) {
std::stringstream ss;
ss << val;
return ss.str();
}
static std::vector<std::string> ctrlvec_load_prompt_file(std::string path, bool skip_empty_lines) {
std::vector<std::string> output;
std::ifstream file(path);
if (!file.is_open()) {
fprintf(stderr, "error: unable to open file: %s\n", path.c_str());
exit(1);
}
std::string line;
while (std::getline(file, line)) {
bool is_skip = skip_empty_lines && line.empty();
if (!is_skip) {
string_process_escapes(line);
output.push_back(line);
}
}
file.close();
return output;
}
//////////////////////////////////////////////////
static bool cb_eval(struct ggml_tensor * t, bool ask, void * user_data) {
auto * cb_data = (callback_data *) user_data;
static const char * l_out_name = "l_out";
const bool is_l_out = strncmp(t->name, l_out_name, strlen(l_out_name)) == 0;
if (ask) {
return is_l_out;
}
if (!is_l_out || t->ne[1] != cb_data->n_tokens) {
return true;
}
|
// save the tensor to current context
cb_data->save_tensor_for_layer(t);
return true;
|
}
static bool get_hidden_layers(llama_context * ctx, std::vector<llama_token> & tokens) {
llama_memory_clear(llama_get_memory(ctx), true);
if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size()))) {
fprintf(stderr, "%s : failed to eval\n", __func__);
return false;
}
return true;
}
static void export_gguf(const std::vector<struct ggml_tensor *> & v_ctrl, const std::string fname, const std::string model_hint) {
struct gguf_context * ctx = gguf_init_empty();
const std::string arch = "controlvector";
gguf_set_val_str(ctx, "general.architecture", arch.c_str());
gguf_set_val_str(ctx, (arch + ".model_hint").c_str(), model_hint.c_str());
gguf_set_val_i32(ctx, (arch + ".layer_count").c_str(), v_ctrl.size());
for (size_t i = 0; i < v_ctrl.size(); ++i) {
gguf_add_tensor(ctx, v_ctrl[i]);
print_debug_tensor(v_ctrl[i]);
printf("Added tensor: %s\n", v_ctrl[i]->name);
}
printf("%s: writing file...\n", __func__);
gguf_write_to_file(ctx, fname.c_str(), false);
printf("%s: wrote file '%s'\n", __func__, fname.c_str());
gguf_free(ctx);
}
/**
* Load prompt files and completion file.
* Then format each pair of prompt + completion to make an entry.
*/
static int prepare_entries(common_params & params, train_context & ctx_train) {
// load prompts
std::vector<std::string> positive_prompts = ctrlvec_load_prompt_file(params.cvector_positive_file, true);
std::vector<std::string> negative_prompts = ctrlvec_load_prompt_file(params.cvector_negative_file, true);
if (positive_prompts.size() != negative_prompts.size()) {
fprintf(stderr, "number of positive and negative prompts must be equal\n");
return 1;
}
if (positive_prompts.empty()) {
fprintf(stderr, "must provide at least one prompt pair\n");
return 1;
}
ctx_train.positive_entries = positive_prompts;
ctx_train.negative_entries = negative_prompts;
retu
|
ast_based
|
<|fim_prefix|>ng */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/input/input_event.h"
#include "core/math/rect2.h"
#include "core/os/memory.h"
#include "core/variant/array.h"
#include "tests/test_macros.h"
namespace TestInputEvent {
TEST_CASE("[InputEvent] Signal is emitted when device is changed") {
Ref<InputEventKey> input_event;
input_event.instantiate();
SIGNAL_WATCH(*input_event, CoreStringName(changed));
Array empty_args = { {} };
input_event->set_device(1);
SIGNAL_CHECK("changed", empty_args);
CHECK(input_event->get_device() == 1);
SIGNAL_UNWATCH(*input_event, CoreStringName(changed));
}
TEST_CASE("[InputEvent] Test accumulate") {
Ref<InputEventMouseMotion> iemm1, iemm2;
Ref<InputEventKey> iek;
iemm1.instantiate(), iemm2.instantiate();
iek.instantiate();
<|fim_suffix|>
iemm2->set_button_mask(MouseButtonMask::LEFT);
CHECK(iemm1->accumulate(iemm2));
CHECK_FALSE(iemm1->accumulate(iek));
CHECK_FALSE(iemm2->accumulate(iek));
}
TEST_CASE("[InputEvent][SceneTree] Test methods that interact with the InputMap") {
const String mock_action = "mock_action";
Ref<InputEventJoypadMotion> iejm;
iejm.instantiate();
InputMap::get_singleton()->add_action(mock_action, 0.5);
InputMap::get_singleton()->action_add_event(mock_action, iejm);
CHECK(iejm->is_action_type());
CHECK(iejm->is_action(mock_action));
CHECK(iejm->is_action_released(mock_action));
CHECK(Math::is_equal_approx(iejm->get_action_strength(mock_action), 0.0f));
iejm->set_axis_value(0.8f);
// Since deadzone is 0.5, action_strength grows linearly from 0.5 to 1.0.
CHECK(Math::is_equal_approx(iejm->get_action_strength(mock_action), 0.6f));
CHECK(Math::is_equal_approx(iejm->get_action_raw_strength(mock_action), 0.8f));
CHECK(iejm->is_action_pressed(mock_action));
InputMap::get_singleton()->erase_action(mock_action);
}
TEST_CASE("[InputEvent] Test xformed_by") {
Ref<InputEventMouseMotion> iemm1;
iemm1.instantiate();
iemm1->set_position(Vector2(0.0f, 0.0f));
Transform2D transform;
transform = transform.translated(Vector2(2.0f, 3.0f));
Ref<InputEventMouseMotion> iemm2 = iemm1->xformed_by(transform);
CHECK(iemm2->get_position().is_equal_approx(Vector2(2.0f, 3.0f)));
}
} // namespace TestInputEvent
<|fim_middle|>iemm1->set_button_mask(MouseButtonMask::LEFT);
CHECK_FALSE(iemm1->accumulate(iemm2));
|
ng */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/input/input_event.h"
#include "core/math/rect2.h"
#include "core/os/memory.h"
#include "core/variant/array.h"
#include "tests/test_macros.h"
namespace TestInputEvent {
TEST_CASE("[InputEvent] Signal is emitted when device is changed") {
Ref<InputEventKey> input_event;
input_event.instantiate();
SIGNAL_WATCH(*input_event, CoreStringName(changed));
Array empty_args = { {} };
input_event->set_device(1);
SIGNAL_CHECK("changed", empty_args);
CHECK(input_event->get_device() == 1);
SIGNAL_UNWATCH(*input_event, CoreStringName(changed));
}
TEST_CASE("[InputEvent] Test accumulate") {
Ref<InputEventMouseMotion> iemm1, iemm2;
Ref<InputEventKey> iek;
iemm1.instantiate(), iemm2.instantiate();
iek.instantiate();
|
iemm1->set_button_mask(MouseButtonMask::LEFT);
CHECK_FALSE(iemm1->accumulate(iemm2));
|
iemm2->set_button_mask(MouseButtonMask::LEFT);
CHECK(iemm1->accumulate(iemm2));
CHECK_FALSE(iemm1->accumulate(iek));
CHECK_FALSE(iemm2->accumulate(iek));
}
TEST_CASE("[InputEvent][SceneTree] Test methods that interact with the InputMap") {
const String mock_action = "mock_action";
Ref<InputEventJoypadMotion> iejm;
iejm.instantiate();
InputMap::get_singleton()->add_action(mock_action, 0.5);
InputMap::get_singleton()->action_add_event(mock_action, iejm);
CHECK(iejm->is_action_type());
CHECK(iejm->is_action(mock_action));
CHECK(iejm->is_action_released(mock_action));
CHECK(Math::is_equal_approx(iejm->get_action_strength(mock_action), 0.0f));
iejm->set_axis_value(0.8f);
// Since deadzone is 0.5, action_strength grows linearly from 0.5 to 1.0.
CHECK(Math::is_equal_approx(iejm->get_action_strength(mock_action), 0.6f));
CHECK(Math::is_equal_approx(iejm->get_action_raw_strength(mock_action), 0.8f));
CHECK(iejm->is_action_pressed(mock_action));
InputMap::get_singleton()->erase_action(mock_action);
}
TEST_CASE("[InputEvent] Test xformed_by") {
Ref<InputEventMouseMotion> iemm1;
iemm1.instantiate();
iemm1->set_position(Vector2(0.0f, 0.0f));
Transform2D transform;
transform = transform.translated(Vector2(2.0f, 3.0f));
Ref<InputEventMouseMotion> iemm2 = iemm1->xformed_by(transform);
CHECK(iemm2->get_position().is_equal_approx(Vector2(2.0f, 3.0f)));
}
} // namespace TestInputEvent
|
ast_based
|
<|fim_prefix|>(smpl, &cur_p);
// auto token = cur_p.data[cur_p.selected].id;
// llama_sampler_accept(smpl, token);
// return token;
// Returns the sampled token
LLAMA_API llama_token llama_sampler_sample(struct llama_sampler * smpl, struct llama_context * ctx, int32_t idx);
// TODO: extend in the future
//LLAMA_API void llama_decode_with_sampler(struct llama_context * ctx, struct llama_sampler * smpl, struct llama_batch batch, ...);
//
// Model split
//
/// @details Build a split GGUF final path for this chunk.
/// llama_split_path(split_path, sizeof(split_path), "/models/ggml-model-q4_0", 2, 4) => split_path = "/models/ggml-model-q4_0-00002-of-00004.gguf"
// Returns the split_path length.
LLAMA_API int llama_split_path(char * split_path, size_t maxlen, const char * path_prefix, int split_no, int split_count);
/// @details Extract the path prefix from the split_path if and only if the split_no and split_count match.
/// llama_split_prefix(split_prefix, 64, "/models/ggml-model-q4_0-00002-of-00004.gguf", 2, 4) => split_prefix = "/models/ggml-model-q4_0"
// Returns the split_prefix length.
LLAMA_API int llama_split_prefix(char * split_prefix, size_t maxlen, const char * split_path, int split_no, int split_count);
// Print system information
LLAMA_API const char * llama_print_system_info(void);
// Set callback for all future logging events.
// If this is not called, or NULL is supplied, everything is output on stderr.
LLAMA_API void llama_log_set(ggml_log_callback log_callback, void * user_data);
//
// Performance utils
//
// NOTE: Used by llama.cpp examples/tools, avoid using in third-party apps. Instead, do your own performance measurements.
//
struct llama_perf_context_data {
// ms == milliseconds
double t_start_ms; // absolute start time
double t_load_ms; // time needed for loading the model
dou<|fim_suffix|> int32_t n_p_eval; // number of prompt tokens
int32_t n_eval; // number of generated tokens
int32_t n_reused; // number of times a ggml compute graph had been reused
};
struct llama_perf_sampler_data {
double t_sample_ms; // time needed for sampling in ms
int32_t n_sample; // number of sampled tokens
};
LLAMA_API struct llama_perf_context_data llama_perf_context (const struct llama_context * ctx);
LLAMA_API void llama_perf_context_print(const struct llama_context * ctx);
LLAMA_API void llama_perf_context_reset( struct llama_context * ctx);
// NOTE: the following work only with samplers constructed via llama_sampler_chain_init
LLAMA_API struct llama_perf_sampler_data llama_perf_sampler (const struct llama_sampler * chain);
LLAMA_API void llama_perf_sampler_print(const struct llama_sampler * chain);
LLAMA_API void llama_perf_sampler_reset( struct llama_sampler * chain);
// print a breakdown of per-device memory use via LLAMA_LOG:
LLAMA_API void llama_memory_breakdown_print(const struct llama_context * ctx);
//
// training
//
// function that returns whether or not a given tensor contains trainable parameters
typedef bool (*llama_opt_param_filter)(const struct ggml_tensor * tensor, void * userdata);
// always returns true
LLAMA_API bool llama_opt_param_filter_all(const struct ggml_tensor * tensor, void * userdata);
struct llama_opt_params {
uint32_t n_ctx_train; // assumed context size post training, use context size specified in llama_context if 0
llama_opt_param_filter param_filter; // callback for determining which tensors contain trainable parameters
void * param_filter_ud; // userdata for determining which tensors contain trainable parameters
ggml_opt_get_optimi<|fim_middle|>ble t_p_eval_ms; // time needed for processing the prompt
double t_eval_ms; // time needed for generating tokens
|
(smpl, &cur_p);
// auto token = cur_p.data[cur_p.selected].id;
// llama_sampler_accept(smpl, token);
// return token;
// Returns the sampled token
LLAMA_API llama_token llama_sampler_sample(struct llama_sampler * smpl, struct llama_context * ctx, int32_t idx);
// TODO: extend in the future
//LLAMA_API void llama_decode_with_sampler(struct llama_context * ctx, struct llama_sampler * smpl, struct llama_batch batch, ...);
//
// Model split
//
/// @details Build a split GGUF final path for this chunk.
/// llama_split_path(split_path, sizeof(split_path), "/models/ggml-model-q4_0", 2, 4) => split_path = "/models/ggml-model-q4_0-00002-of-00004.gguf"
// Returns the split_path length.
LLAMA_API int llama_split_path(char * split_path, size_t maxlen, const char * path_prefix, int split_no, int split_count);
/// @details Extract the path prefix from the split_path if and only if the split_no and split_count match.
/// llama_split_prefix(split_prefix, 64, "/models/ggml-model-q4_0-00002-of-00004.gguf", 2, 4) => split_prefix = "/models/ggml-model-q4_0"
// Returns the split_prefix length.
LLAMA_API int llama_split_prefix(char * split_prefix, size_t maxlen, const char * split_path, int split_no, int split_count);
// Print system information
LLAMA_API const char * llama_print_system_info(void);
// Set callback for all future logging events.
// If this is not called, or NULL is supplied, everything is output on stderr.
LLAMA_API void llama_log_set(ggml_log_callback log_callback, void * user_data);
//
// Performance utils
//
// NOTE: Used by llama.cpp examples/tools, avoid using in third-party apps. Instead, do your own performance measurements.
//
struct llama_perf_context_data {
// ms == milliseconds
double t_start_ms; // absolute start time
double t_load_ms; // time needed for loading the model
dou
|
ble t_p_eval_ms; // time needed for processing the prompt
double t_eval_ms; // time needed for generating tokens
|
int32_t n_p_eval; // number of prompt tokens
int32_t n_eval; // number of generated tokens
int32_t n_reused; // number of times a ggml compute graph had been reused
};
struct llama_perf_sampler_data {
double t_sample_ms; // time needed for sampling in ms
int32_t n_sample; // number of sampled tokens
};
LLAMA_API struct llama_perf_context_data llama_perf_context (const struct llama_context * ctx);
LLAMA_API void llama_perf_context_print(const struct llama_context * ctx);
LLAMA_API void llama_perf_context_reset( struct llama_context * ctx);
// NOTE: the following work only with samplers constructed via llama_sampler_chain_init
LLAMA_API struct llama_perf_sampler_data llama_perf_sampler (const struct llama_sampler * chain);
LLAMA_API void llama_perf_sampler_print(const struct llama_sampler * chain);
LLAMA_API void llama_perf_sampler_reset( struct llama_sampler * chain);
// print a breakdown of per-device memory use via LLAMA_LOG:
LLAMA_API void llama_memory_breakdown_print(const struct llama_context * ctx);
//
// training
//
// function that returns whether or not a given tensor contains trainable parameters
typedef bool (*llama_opt_param_filter)(const struct ggml_tensor * tensor, void * userdata);
// always returns true
LLAMA_API bool llama_opt_param_filter_all(const struct ggml_tensor * tensor, void * userdata);
struct llama_opt_params {
uint32_t n_ctx_train; // assumed context size post training, use context size specified in llama_context if 0
llama_opt_param_filter param_filter; // callback for determining which tensors contain trainable parameters
void * param_filter_ud; // userdata for determining which tensors contain trainable parameters
ggml_opt_get_optimi
|
ast_based
|
<|fim_prefix|> int track_idx = E->key().track;
int key_idx = E->key().key;
float time = E->value().pos;
undo_redo->add_do_method(animation.ptr(), "track_remove_key_at_time", track_idx, time);
undo_redo->add_undo_method(animation.ptr(), "track_insert_key", track_idx, time, animation->track_get_key_value(track_idx, key_idx), animation->track_get_key_transition(track_idx, key_idx));
undo_redo->add_undo_method(this, "_select_at_anim", animation, track_idx, time, i == 0);
i++;
}
i = 0;
for (RBMap<AnimationTrackEditor::SelectedKey, AnimationTrackEditor::KeyInfo>::Element *E = keys.back(); E; E = E->prev()) {
undo_redo->add_undo_method(this, "_select_at_anim", animation, E->key().track, E->value().pos, i == 0);
i++;
}
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->add_do_method(this, "queue_redraw");
undo_redo->add_undo_method(this, "queue_redraw");
undo_redo->commit_action();
}
}
void AnimationBezierTrackEdit::paste_keys(real_t p_ofs, bool p_ofs_valid) {
if (editor->is_key_clipboard_active() && animation.is_valid() && (selected_track >= 0 && selected_track < animation->get_track_count())) {
EditorUndoRedoManager *undo_redo = EditorUndoRedoManager::get_singleton();
undo_redo->create_action(TTR("Animation Paste Keys"));
bool same_track = true;
bool all_compatible = true;
for (int i = 0; i < editor->key_clipboard.keys.size(); i++) {
const AnimationTrackEditor::KeyClipboard::Key key = editor->key_clipboard.keys[i];
if (key.track != 0) {
same_track = false;
break;
}
if (!editor->_is_track_compatible(selected_track, key.value.get_type(), key.track_type)) {
all_compatible = false;
break;
}
}
ERR_FAIL_COND_MSG(!all_compatible, "Paste failed: Not all animation keys were compatible with their target tracks");<|fim_suffix|>
List<Pair<int, float>> new_selection_values;
for (int i = 0; i < editor->key_clipboard.keys.size(); i++) {
const AnimationTrackEditor::KeyClipboard::Key key = editor->key_clipboard.keys[i];
float insert_pos = p_ofs_valid ? p_ofs : timeline->get_play_position();
if (p_ofs_valid) {
if (editor->snap_keys->is_pressed() && editor->step->get_value() != 0) {
insert_pos = editor->snap_time(insert_pos);
}
}
float dst_time = key.time + insert_pos;
int existing_idx = animation->track_find_key(selected_track, dst_time, Animation::FIND_MODE_APPROX);
Variant value = key.value;
if (key.track_type != Animation::TYPE_BEZIER) {
value = animation->make_default_bezier_key(key.value);
}
undo_redo->add_do_method(animation.ptr(), "track_insert_key", selected_track, dst_time, value, key.transition);
undo_redo->add_undo_method(animation.ptr(), "track_remove_key_at_time", selected_track, dst_time);
Pair<int, float> p;
p.first = selected_track;
p.second = dst_time;
new_selection_values.push_back(p);
if (existing_idx != -1) {
undo_redo->add_undo_method(animation.ptr(), "track_insert_key", selected_track, dst_time, animation->track_get_key_value(selected_track, existing_idx), animation->track_get_key_transition(selected_track, existing_idx));
}
}
undo_redo->add_do_method(this, "_clear_selection_for_anim", animation);
undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation);
// Reselect pasted.
int i = 0;
for (const Pair<int, float> &E : new_selection_values) {
undo_redo->add_do_method(this, "_select_at_anim", animation, E.first, E.second, i == 0);
i++;
}
i = 0;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, animation->track_get_key_time(E->get().first, E->get().second), i == 0);
i++;
}
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();<|fim_middle|> if (!same_track) {
WARN_PRINT("Pasted animation keys from multiple tracks into single Bezier track");
}
|
int track_idx = E->key().track;
int key_idx = E->key().key;
float time = E->value().pos;
undo_redo->add_do_method(animation.ptr(), "track_remove_key_at_time", track_idx, time);
undo_redo->add_undo_method(animation.ptr(), "track_insert_key", track_idx, time, animation->track_get_key_value(track_idx, key_idx), animation->track_get_key_transition(track_idx, key_idx));
undo_redo->add_undo_method(this, "_select_at_anim", animation, track_idx, time, i == 0);
i++;
}
i = 0;
for (RBMap<AnimationTrackEditor::SelectedKey, AnimationTrackEditor::KeyInfo>::Element *E = keys.back(); E; E = E->prev()) {
undo_redo->add_undo_method(this, "_select_at_anim", animation, E->key().track, E->value().pos, i == 0);
i++;
}
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->add_do_method(this, "queue_redraw");
undo_redo->add_undo_method(this, "queue_redraw");
undo_redo->commit_action();
}
}
void AnimationBezierTrackEdit::paste_keys(real_t p_ofs, bool p_ofs_valid) {
if (editor->is_key_clipboard_active() && animation.is_valid() && (selected_track >= 0 && selected_track < animation->get_track_count())) {
EditorUndoRedoManager *undo_redo = EditorUndoRedoManager::get_singleton();
undo_redo->create_action(TTR("Animation Paste Keys"));
bool same_track = true;
bool all_compatible = true;
for (int i = 0; i < editor->key_clipboard.keys.size(); i++) {
const AnimationTrackEditor::KeyClipboard::Key key = editor->key_clipboard.keys[i];
if (key.track != 0) {
same_track = false;
break;
}
if (!editor->_is_track_compatible(selected_track, key.value.get_type(), key.track_type)) {
all_compatible = false;
break;
}
}
ERR_FAIL_COND_MSG(!all_compatible, "Paste failed: Not all animation keys were compatible with their target tracks");
|
if (!same_track) {
WARN_PRINT("Pasted animation keys from multiple tracks into single Bezier track");
}
|
List<Pair<int, float>> new_selection_values;
for (int i = 0; i < editor->key_clipboard.keys.size(); i++) {
const AnimationTrackEditor::KeyClipboard::Key key = editor->key_clipboard.keys[i];
float insert_pos = p_ofs_valid ? p_ofs : timeline->get_play_position();
if (p_ofs_valid) {
if (editor->snap_keys->is_pressed() && editor->step->get_value() != 0) {
insert_pos = editor->snap_time(insert_pos);
}
}
float dst_time = key.time + insert_pos;
int existing_idx = animation->track_find_key(selected_track, dst_time, Animation::FIND_MODE_APPROX);
Variant value = key.value;
if (key.track_type != Animation::TYPE_BEZIER) {
value = animation->make_default_bezier_key(key.value);
}
undo_redo->add_do_method(animation.ptr(), "track_insert_key", selected_track, dst_time, value, key.transition);
undo_redo->add_undo_method(animation.ptr(), "track_remove_key_at_time", selected_track, dst_time);
Pair<int, float> p;
p.first = selected_track;
p.second = dst_time;
new_selection_values.push_back(p);
if (existing_idx != -1) {
undo_redo->add_undo_method(animation.ptr(), "track_insert_key", selected_track, dst_time, animation->track_get_key_value(selected_track, existing_idx), animation->track_get_key_transition(selected_track, existing_idx));
}
}
undo_redo->add_do_method(this, "_clear_selection_for_anim", animation);
undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation);
// Reselect pasted.
int i = 0;
for (const Pair<int, float> &E : new_selection_values) {
undo_redo->add_do_method(this, "_select_at_anim", animation, E.first, E.second, i == 0);
i++;
}
i = 0;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, animation->track_get_key_time(E->get().first, E->get().second), i == 0);
i++;
}
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
|
random
|
<|fim_prefix|> // use FILE * so we don't have to re-open the file to mmap
FILE * fp;
size_t size;
my_llama_file(const char * fname, const char * mode) {
fp = std::fopen(fname, mode);
if (fp == NULL) {
size = 0;
} else {
seek(0, SEEK_END);
size = tell();
seek(0, SEEK_SET);
}
}
size_t tell() const {
#ifdef _WIN32
__int64 ret = _ftelli64(fp);
#else
long ret = std::ftell(fp);
#endif
GGML_ASSERT(ret != -1); // this really shouldn't fail
return (size_t) ret;
}
void seek(size_t offset, int whence) {
#ifdef _WIN32
int ret = _fseeki64(fp, (__int64) offset, whence);
#else
int ret = std::fseek(fp, (long) offset, whence);
#endif
GGML_ASSERT(ret == 0); // same
}
void read_raw(void * ptr, size_t size) {
if (size == 0) {
return;
}
errno = 0;
std::size_t ret = std::fread(ptr, size, 1, fp);
if (ferror(fp)) {
die_fmt("fread failed: %s", strerror(errno));
}
if (ret != 1) {
die("unexpectedly reached end of file");
}
}
std::uint32_t read_u32() {
std::uint32_t ret;
read_raw(&ret, sizeof(ret));
return ret;
}
std::float_t read_f32() {
std::float_t ret;
read_raw(&ret, sizeof(ret));
return ret;
}
std::string read_string(std::uint32_t len) {
std::vector<char> chars(len);
read_raw(chars.data(), len);
return std::string(chars.data(), len);
}
~my_llama_file() {
if (fp) {
std::fclose(fp);
}
}
};
static bool is_ggml_file(const char * filename) {
my_llama_file file(filename, "rb");
if (file.size < 4) {
return false;
}
std::string magic = file.read_string(4);
return magic == GGUF_MAGIC;
}
static std::string llama_escape_whitespaces(const std::string & text) {<|fim_suffix|> LOG_INF("%s: Loading vocabulary from gguf file %s\n", __func__, filename);
struct ggml_context * ctx_data = NULL;
struct gguf_init_params params = {
/*.no_alloc = */ false,
/*.ctx = */ &ctx_data,
};
struct gguf_context * ctx = gguf_init_from_file(filename, params);
GGML_ASSERT(ctx != NULL);
const int model_idx = gguf_find_key(ctx, KV_TOKENIZER_MODEL);
GGML_ASSERT(model_idx >= 0);
std::string tokenizer_name = gguf_get_val_str(ctx, model_idx);
GGML_ASSERT(tokenizer_name == TOKENIZER_NAME);
const int token_idx = gguf_find_key(ctx, KV_TOKENIZER_LIST);
GGML_ASSERT(token_idx >= 0);
const int score_idx = gguf_find_key(ctx, KV_TOKENIZER_SCORES);
GGML_ASSERT(score_idx >= 0);
const float * scores = (const float * ) gguf_get_arr_data(ctx, score_idx);
const int toktype_idx = gguf_find_key(ctx, KV_TOKENIZER_TOKEN_TYPE);
GGML_ASSERT(toktype_idx >= 0);
const int * toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx);
const uint32_t n_vocab = gguf_get_arr_n(ctx, token_idx);
if (n_vocab != static_cast<uint32_t>(config->vocab_size)) {
die_fmt("vocab size mismatch: (gguf) %u != (llama2c) %d", n_vocab, config->vocab_size);
}
vocab->id_to_token.resize(n_vocab);
for (uint32_t i = 0; i < n_vocab; i++) {
std::string word = gguf_get_arr_str(ctx, token_idx, i);
vocab->token_to_id[word] = i;
auto & token_data = vocab->id_to_token[i];
token_data.text = std::move(word);
token_data.score = scores[i];
token_data.type = (llama_token_type) toktypes[i];
}
ggml_free(ctx_data);
gguf_free(ctx);
} else {
// assume llama2.c vocabulary
LOG_INF("%s: Assuming llama2.c vocabulary since %s is not a gguf file\n", __func__, filename);<|fim_middle|> std::ostringstream out;
for (char c : text) {
if (c == ' ') out << "\xe2\x96\x81";
else out << c;
}
return out.str();
}
static void load_vocab(const char * filename, const Config * config, struct my_llama_vocab * vocab) {
if (is_ggml_file(filename)) {
|
// use FILE * so we don't have to re-open the file to mmap
FILE * fp;
size_t size;
my_llama_file(const char * fname, const char * mode) {
fp = std::fopen(fname, mode);
if (fp == NULL) {
size = 0;
} else {
seek(0, SEEK_END);
size = tell();
seek(0, SEEK_SET);
}
}
size_t tell() const {
#ifdef _WIN32
__int64 ret = _ftelli64(fp);
#else
long ret = std::ftell(fp);
#endif
GGML_ASSERT(ret != -1); // this really shouldn't fail
return (size_t) ret;
}
void seek(size_t offset, int whence) {
#ifdef _WIN32
int ret = _fseeki64(fp, (__int64) offset, whence);
#else
int ret = std::fseek(fp, (long) offset, whence);
#endif
GGML_ASSERT(ret == 0); // same
}
void read_raw(void * ptr, size_t size) {
if (size == 0) {
return;
}
errno = 0;
std::size_t ret = std::fread(ptr, size, 1, fp);
if (ferror(fp)) {
die_fmt("fread failed: %s", strerror(errno));
}
if (ret != 1) {
die("unexpectedly reached end of file");
}
}
std::uint32_t read_u32() {
std::uint32_t ret;
read_raw(&ret, sizeof(ret));
return ret;
}
std::float_t read_f32() {
std::float_t ret;
read_raw(&ret, sizeof(ret));
return ret;
}
std::string read_string(std::uint32_t len) {
std::vector<char> chars(len);
read_raw(chars.data(), len);
return std::string(chars.data(), len);
}
~my_llama_file() {
if (fp) {
std::fclose(fp);
}
}
};
static bool is_ggml_file(const char * filename) {
my_llama_file file(filename, "rb");
if (file.size < 4) {
return false;
}
std::string magic = file.read_string(4);
return magic == GGUF_MAGIC;
}
static std::string llama_escape_whitespaces(const std::string & text) {
|
std::ostringstream out;
for (char c : text) {
if (c == ' ') out << "\xe2\x96\x81";
else out << c;
}
return out.str();
}
static void load_vocab(const char * filename, const Config * config, struct my_llama_vocab * vocab) {
if (is_ggml_file(filename)) {
|
LOG_INF("%s: Loading vocabulary from gguf file %s\n", __func__, filename);
struct ggml_context * ctx_data = NULL;
struct gguf_init_params params = {
/*.no_alloc = */ false,
/*.ctx = */ &ctx_data,
};
struct gguf_context * ctx = gguf_init_from_file(filename, params);
GGML_ASSERT(ctx != NULL);
const int model_idx = gguf_find_key(ctx, KV_TOKENIZER_MODEL);
GGML_ASSERT(model_idx >= 0);
std::string tokenizer_name = gguf_get_val_str(ctx, model_idx);
GGML_ASSERT(tokenizer_name == TOKENIZER_NAME);
const int token_idx = gguf_find_key(ctx, KV_TOKENIZER_LIST);
GGML_ASSERT(token_idx >= 0);
const int score_idx = gguf_find_key(ctx, KV_TOKENIZER_SCORES);
GGML_ASSERT(score_idx >= 0);
const float * scores = (const float * ) gguf_get_arr_data(ctx, score_idx);
const int toktype_idx = gguf_find_key(ctx, KV_TOKENIZER_TOKEN_TYPE);
GGML_ASSERT(toktype_idx >= 0);
const int * toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx);
const uint32_t n_vocab = gguf_get_arr_n(ctx, token_idx);
if (n_vocab != static_cast<uint32_t>(config->vocab_size)) {
die_fmt("vocab size mismatch: (gguf) %u != (llama2c) %d", n_vocab, config->vocab_size);
}
vocab->id_to_token.resize(n_vocab);
for (uint32_t i = 0; i < n_vocab; i++) {
std::string word = gguf_get_arr_str(ctx, token_idx, i);
vocab->token_to_id[word] = i;
auto & token_data = vocab->id_to_token[i];
token_data.text = std::move(word);
token_data.score = scores[i];
token_data.type = (llama_token_type) toktypes[i];
}
ggml_free(ctx_data);
gguf_free(ctx);
} else {
// assume llama2.c vocabulary
LOG_INF("%s: Assuming llama2.c vocabulary since %s is not a gguf file\n", __func__, filename);
|
random
|
<|fim_prefix|> or after Recognize.
*/
Boxa *TessBaseAPI::GetConnectedComponents(Pixa **pixa) {
return GetComponentImages(RIL_SYMBOL, true, pixa, nullptr);
}
/**
* Get the given level kind of components (block, textline, word etc.) as a
* leptonica-style Boxa, Pixa pair, in reading order.
* Can be called before or after Recognize.
* If blockids is not nullptr, the block-id of each component is also returned
* as an array of one element per component. delete [] after use.
* If text_only is true, then only text components are returned.
*/
Boxa *TessBaseAPI::GetComponentImages(PageIteratorLevel level, bool text_only, bool raw_image,
const int raw_padding, Pixa **pixa, int **blockids,
int **paraids) {
/*non-const*/ std::unique_ptr</*non-const*/ PageIterator> page_it(GetIterator());
if (page_it == nullptr) {
page_it.reset(AnalyseLayout());
}
if (page_it == nullptr) {
return nullptr; // Failed.
}
// Count the components to get a size for the arrays.
int component_count = 0;
int left, top, right, bottom;
if (raw_image) {
// Get bounding box in original raw image with padding.
do {
if (page_it->BoundingBox(level, raw_padding, &left, &top, &right, &bottom) &&
(!text_only || PTIsTextType(page_it->BlockType()))) {
++component_count;
}
} while (page_it->Next(level));
} else {
// Get bounding box from binarized imaged. Note that this could be
// differently scaled from the original image.
do {
if (page_it->BoundingBoxInternal(level, &left, &top, &right, &bottom) &&
(!text_only || PTIsTextType(page_it->BlockType()))) {
++component_count;
}
} while (page_it->Next(level));
}
Boxa *boxa = boxaCreate(component_count);
if (pixa != nullptr) {
*pixa = pixaCreate(component_count);
}
if (blockids != nullptr) {
*blockids = new int[component_count];
}
if (paraids != nullptr) {
<|fim_suffix|>
}
int blockid = 0;
int paraid = 0;
int component_index = 0;
page_it->Begin();
do {
bool got_bounding_box;
if (raw_image) {
got_bounding_box = page_it->BoundingBox(level, raw_padding, &left, &top, &right, &bottom);
} else {
got_bounding_box = page_it->BoundingBoxInternal(level, &left, &top, &right, &bottom);
}
if (got_bounding_box && (!text_only || PTIsTextType(page_it->BlockType()))) {
Box *lbox = boxCreate(left, top, right - left, bottom - top);
boxaAddBox(boxa, lbox, L_INSERT);
if (pixa != nullptr) {
Pix *pix = nullptr;
if (raw_image) {
pix = page_it->GetImage(level, raw_padding, GetInputImage(), &left, &top);
} else {
pix = page_it->GetBinaryImage(level);
}
pixaAddPix(*pixa, pix, L_INSERT);
pixaAddBox(*pixa, lbox, L_CLONE);
}
if (paraids != nullptr) {
(*paraids)[component_index] = paraid;
if (page_it->IsAtFinalElement(RIL_PARA, level)) {
++paraid;
}
}
if (blockids != nullptr) {
(*blockids)[component_index] = blockid;
if (page_it->IsAtFinalElement(RIL_BLOCK, level)) {
++blockid;
paraid = 0;
}
}
++component_index;
}
} while (page_it->Next(level));
return boxa;
}
int TessBaseAPI::GetThresholdedImageScaleFactor() const {
if (thresholder_ == nullptr) {
return 0;
}
return thresholder_->GetScaleFactor();
}
/**
* Runs page layout analysis in the mode set by SetPageSegMode.
* May optionally be called prior to Recognize to get access to just
* the page layout results. Returns an iterator to the results.
* If merge_similar_words is true, words are combined where suitable for use
* with a line recognizer. Use if you want to use AnalyseLayout to find the
* textlines, and then want to process textline fragments with an external
* line recognizer.
* Returns nullptr on error or an empty page.
* The returned i<|fim_middle|>*paraids = new int[component_count];
|
or after Recognize.
*/
Boxa *TessBaseAPI::GetConnectedComponents(Pixa **pixa) {
return GetComponentImages(RIL_SYMBOL, true, pixa, nullptr);
}
/**
* Get the given level kind of components (block, textline, word etc.) as a
* leptonica-style Boxa, Pixa pair, in reading order.
* Can be called before or after Recognize.
* If blockids is not nullptr, the block-id of each component is also returned
* as an array of one element per component. delete [] after use.
* If text_only is true, then only text components are returned.
*/
Boxa *TessBaseAPI::GetComponentImages(PageIteratorLevel level, bool text_only, bool raw_image,
const int raw_padding, Pixa **pixa, int **blockids,
int **paraids) {
/*non-const*/ std::unique_ptr</*non-const*/ PageIterator> page_it(GetIterator());
if (page_it == nullptr) {
page_it.reset(AnalyseLayout());
}
if (page_it == nullptr) {
return nullptr; // Failed.
}
// Count the components to get a size for the arrays.
int component_count = 0;
int left, top, right, bottom;
if (raw_image) {
// Get bounding box in original raw image with padding.
do {
if (page_it->BoundingBox(level, raw_padding, &left, &top, &right, &bottom) &&
(!text_only || PTIsTextType(page_it->BlockType()))) {
++component_count;
}
} while (page_it->Next(level));
} else {
// Get bounding box from binarized imaged. Note that this could be
// differently scaled from the original image.
do {
if (page_it->BoundingBoxInternal(level, &left, &top, &right, &bottom) &&
(!text_only || PTIsTextType(page_it->BlockType()))) {
++component_count;
}
} while (page_it->Next(level));
}
Boxa *boxa = boxaCreate(component_count);
if (pixa != nullptr) {
*pixa = pixaCreate(component_count);
}
if (blockids != nullptr) {
*blockids = new int[component_count];
}
if (paraids != nullptr) {
|
*paraids = new int[component_count];
|
}
int blockid = 0;
int paraid = 0;
int component_index = 0;
page_it->Begin();
do {
bool got_bounding_box;
if (raw_image) {
got_bounding_box = page_it->BoundingBox(level, raw_padding, &left, &top, &right, &bottom);
} else {
got_bounding_box = page_it->BoundingBoxInternal(level, &left, &top, &right, &bottom);
}
if (got_bounding_box && (!text_only || PTIsTextType(page_it->BlockType()))) {
Box *lbox = boxCreate(left, top, right - left, bottom - top);
boxaAddBox(boxa, lbox, L_INSERT);
if (pixa != nullptr) {
Pix *pix = nullptr;
if (raw_image) {
pix = page_it->GetImage(level, raw_padding, GetInputImage(), &left, &top);
} else {
pix = page_it->GetBinaryImage(level);
}
pixaAddPix(*pixa, pix, L_INSERT);
pixaAddBox(*pixa, lbox, L_CLONE);
}
if (paraids != nullptr) {
(*paraids)[component_index] = paraid;
if (page_it->IsAtFinalElement(RIL_PARA, level)) {
++paraid;
}
}
if (blockids != nullptr) {
(*blockids)[component_index] = blockid;
if (page_it->IsAtFinalElement(RIL_BLOCK, level)) {
++blockid;
paraid = 0;
}
}
++component_index;
}
} while (page_it->Next(level));
return boxa;
}
int TessBaseAPI::GetThresholdedImageScaleFactor() const {
if (thresholder_ == nullptr) {
return 0;
}
return thresholder_->GetScaleFactor();
}
/**
* Runs page layout analysis in the mode set by SetPageSegMode.
* May optionally be called prior to Recognize to get access to just
* the page layout results. Returns an iterator to the results.
* If merge_similar_words is true, words are combined where suitable for use
* with a line recognizer. Use if you want to use AnalyseLayout to find the
* textlines, and then want to process textline fragments with an external
* line recognizer.
* Returns nullptr on error or an empty page.
* The returned i
|
ast_based
|
<|fim_prefix|>/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#include "animated_sprite_2d.h"
#include "scene/main/viewport.h"
#ifdef TOOLS_ENABLED
Dictionary AnimatedSprite2D::_edit_get_state() const {
Dictionary state = Node2D::_edit_get_state();
state["offset"] = offset;
return state;
}
void AnimatedSprite2D::_edit_set_state(const Dictionary &p_state) {
Node2D::_edit_set_state(p_state);
set_offset(p_state["offset"]);
}
void AnimatedSprite2D::_edit_set_pivot(const Point2 &p_pivot) {
set_offset(get_offset() - p_pivot);
set_position(get_transform().xform(p_pivot));
}
Point2 AnimatedSprite2D::_edit_get_pivot() const {
return Vector2();
}
bool AnimatedSprite2D::_edit_use_pivot() const {
return true;
}
#endif // TOOLS_ENABLED
#ifdef DEBUG_ENABLED
Rect2 AnimatedSprite2D::_edit_get_rect() const {
return _get_rect();
}
bool AnimatedSprite2D::_edit_use_rect() const {
if (frames.is_null() || !frames->has_animation(animation)) {
return false;
}
if (frame < 0 || frame >= frames->get_frame_count(animation)) {
return false;
}
Ref<Texture2D> t;
if (animation) {
t = frames->get_frame_texture(animation, frame);
}
return t.is_valid();
}
#endif // DEBUG_ENABLED
Rect2 AnimatedSprite2D::get_anchorable_rect() const {
return _get_rect();
}
Rect2 AnimatedSprite2D::_get_rect() const {
if (frames.is_null() || !frames->has_animation(animation)) {
return Rect2();
}
if (frame < 0 || frame >= frames->get_frame_count(animation)) {
return Rect2();
}
Ref<Texture2D> t;
if (animation) {
t = frames->get_frame_texture(animation, frame);
}
if (t.is_null()) {
return Rect2();
}<|fim_suffix|> }
return Rect2(ofs, s);
}
void AnimatedSprite2D::_validate_property(PropertyInfo &p_property) const {
if (frames.is_null()) {
return;
}
if (!Engine::get_singleton()->is_editor_hint()) {
if (p_property.name == "frame" && playing) {
p_property.usage = PROPERTY_USAGE_EDITOR | PROPERTY_USAGE_READ_ONLY;
}
return;
}
if (p_property.name == "animation") {
List<StringName> names;
frames->get_animation_list(&names);
names.sort_custom<StringName::AlphCompare>();
bool current_found = false;
bool is_first_element = true;
for (const StringName &E : names) {
if (!is_first_element) {
p_property.hint_string += ",";
} else {
is_first_element = false;
}
p_property.hint_string += String(E);
if (animation == E) {
current_found = true;
}
}
if (!current_found) {
if (p_property.hint_string.is_empty()) {
p_property.hint_string = String(animation);
} else {
p_property.hint_string = String(animation) + "," + p_property.hint_string;
}
}
return;
}
if (p_property.name == "frame") {
if (playing) {
p_property.usage = PROPERTY_USAGE_EDITOR | PROPERTY_USAGE_READ_ONLY;
return;
}
p_property.hint = PROPERTY_HINT_RANGE;
if (frames->has_animation(animation) && frames->get_frame_count(animation) > 0) {
p_property.hint_string = "0," + itos(frames->get_frame_count(animation) - 1) + ",1";
} else {
// Avoid an error, `hint_string` is required for `PROPERTY_HINT_RANGE`.
p_property.hint_string = "0,0,1";
}
p_property.usage |= PROPERTY_USAGE_KEYING_INCREMENTS;
}
}
void AnimatedSprite2D::_notification(int p_what) {
switch (p_what) {
case NOTIFICATION_ACCESSIBILITY_UPDATE: {
RID ae = get_accessibility_element();
ERR_FAIL_COND(ae.is_null());
Rect2 dst_rect = _get_rect();
DisplayServer::get_singleton()->accessibility_update_set_role(ae, DisplayServer::AccessibilityRole::ROLE_IMAGE);
DisplayServer::get_singleton()->accessibility_update_set_transform(ae, get_transform());<|fim_middle|> Size2 s = t->get_size();
Point2 ofs = offset;
if (centered) {
ofs -= s / 2;
}
if (s == Size2(0, 0)) {
s = Size2(1, 1);
|
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#include "animated_sprite_2d.h"
#include "scene/main/viewport.h"
#ifdef TOOLS_ENABLED
Dictionary AnimatedSprite2D::_edit_get_state() const {
Dictionary state = Node2D::_edit_get_state();
state["offset"] = offset;
return state;
}
void AnimatedSprite2D::_edit_set_state(const Dictionary &p_state) {
Node2D::_edit_set_state(p_state);
set_offset(p_state["offset"]);
}
void AnimatedSprite2D::_edit_set_pivot(const Point2 &p_pivot) {
set_offset(get_offset() - p_pivot);
set_position(get_transform().xform(p_pivot));
}
Point2 AnimatedSprite2D::_edit_get_pivot() const {
return Vector2();
}
bool AnimatedSprite2D::_edit_use_pivot() const {
return true;
}
#endif // TOOLS_ENABLED
#ifdef DEBUG_ENABLED
Rect2 AnimatedSprite2D::_edit_get_rect() const {
return _get_rect();
}
bool AnimatedSprite2D::_edit_use_rect() const {
if (frames.is_null() || !frames->has_animation(animation)) {
return false;
}
if (frame < 0 || frame >= frames->get_frame_count(animation)) {
return false;
}
Ref<Texture2D> t;
if (animation) {
t = frames->get_frame_texture(animation, frame);
}
return t.is_valid();
}
#endif // DEBUG_ENABLED
Rect2 AnimatedSprite2D::get_anchorable_rect() const {
return _get_rect();
}
Rect2 AnimatedSprite2D::_get_rect() const {
if (frames.is_null() || !frames->has_animation(animation)) {
return Rect2();
}
if (frame < 0 || frame >= frames->get_frame_count(animation)) {
return Rect2();
}
Ref<Texture2D> t;
if (animation) {
t = frames->get_frame_texture(animation, frame);
}
if (t.is_null()) {
return Rect2();
}
|
Size2 s = t->get_size();
Point2 ofs = offset;
if (centered) {
ofs -= s / 2;
}
if (s == Size2(0, 0)) {
s = Size2(1, 1);
|
}
return Rect2(ofs, s);
}
void AnimatedSprite2D::_validate_property(PropertyInfo &p_property) const {
if (frames.is_null()) {
return;
}
if (!Engine::get_singleton()->is_editor_hint()) {
if (p_property.name == "frame" && playing) {
p_property.usage = PROPERTY_USAGE_EDITOR | PROPERTY_USAGE_READ_ONLY;
}
return;
}
if (p_property.name == "animation") {
List<StringName> names;
frames->get_animation_list(&names);
names.sort_custom<StringName::AlphCompare>();
bool current_found = false;
bool is_first_element = true;
for (const StringName &E : names) {
if (!is_first_element) {
p_property.hint_string += ",";
} else {
is_first_element = false;
}
p_property.hint_string += String(E);
if (animation == E) {
current_found = true;
}
}
if (!current_found) {
if (p_property.hint_string.is_empty()) {
p_property.hint_string = String(animation);
} else {
p_property.hint_string = String(animation) + "," + p_property.hint_string;
}
}
return;
}
if (p_property.name == "frame") {
if (playing) {
p_property.usage = PROPERTY_USAGE_EDITOR | PROPERTY_USAGE_READ_ONLY;
return;
}
p_property.hint = PROPERTY_HINT_RANGE;
if (frames->has_animation(animation) && frames->get_frame_count(animation) > 0) {
p_property.hint_string = "0," + itos(frames->get_frame_count(animation) - 1) + ",1";
} else {
// Avoid an error, `hint_string` is required for `PROPERTY_HINT_RANGE`.
p_property.hint_string = "0,0,1";
}
p_property.usage |= PROPERTY_USAGE_KEYING_INCREMENTS;
}
}
void AnimatedSprite2D::_notification(int p_what) {
switch (p_what) {
case NOTIFICATION_ACCESSIBILITY_UPDATE: {
RID ae = get_accessibility_element();
ERR_FAIL_COND(ae.is_null());
Rect2 dst_rect = _get_rect();
DisplayServer::get_singleton()->accessibility_update_set_role(ae, DisplayServer::AccessibilityRole::ROLE_IMAGE);
DisplayServer::get_singleton()->accessibility_update_set_transform(ae, get_transform());
|
random
|
<|fim_prefix|>/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#include "animation_bezier_editor.h"
#include "editor/animation/animation_player_editor_plugin.h"
#include "editor/editor_node.h"
#include "editor/editor_string_names.h"
#include "editor/editor_undo_redo_manager.h"
#include "editor/gui/editor_spin_slider.h"
#include "editor/settings/editor_settings.h"
#include "editor/themes/editor_scale.h"
#include "scene/gui/option_button.h"
#include "scene/gui/view_panner.h"
#include "scene/resources/text_line.h"
#include <climits>
<|fim_suffix|> h = (h - timeline_v_scroll) / timeline_v_zoom;
h = (get_size().height / 2.0) - h;
return h;
}
void AnimationBezierTrackEdit::_draw_track(int p_track, const Color &p_color) {
float scale = timeline->get_zoom_scale();
int limit = timeline->get_name_limit();
int right_limit = get_size().width;
// Selection may have altered the order of keys.
RBMap<real_t, int> key_order;
for (int i = 0; i < animation->track_get_key_count(p_track); i++) {
real_t ofs = animation->track_get_key_time(p_track, i);
if (selection.has(IntPair(p_track, i))) {
if (moving_selection) {
ofs += moving_selection_offset.x;
} else if (scaling_selection) {
ofs += -scaling_selection_offset.x + (ofs - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
}
}
key_order[ofs] = i;
}
for (RBMap<real_t, int>::Element *E = key_order.front(); E; E = E->next()) {
int i = E->get();
if (!E->next()) {
break;
}
int i_n = E->next()->get();
float offset = animation->track_get_key_time(p_track, i);
float height = animation->bezier_track_get_key_value(p_track, i);
Vector2 out_handle = animation->bezier_track_get_key_out_handle(p_track, i);
if (p_track == moving_handle_track && (moving_handle == -1 || moving_handle == 1) && moving_handle_key == i) {
out_handle = moving_handle_right;
}
if (selection.has(IntPair(p_track, i))) {
if (moving_selection) {
offset += moving_selection_offset.x;
height += moving_selection_offset.y;
} else if (scaling_selection) {
offset += -scaling_selection_offset.x + (offset - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
height += -scaling_selection_offset.y + (height - scaling_selection_pivot.y) * (scaling_selection_scale.y - 1);
}
}
float offset_n = animation->track_get_key_time(p_track, i_n);
float height_n = animation->bezier_track_get_key_value(p_track, i_n);
Vector2 in_handle = animation->bezier_track_get_key_in_handle(p_track, i_n);<|fim_middle|>float AnimationBezierTrackEdit::_bezier_h_to_pixel(float p_h) {
float h = p_h;
|
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#include "animation_bezier_editor.h"
#include "editor/animation/animation_player_editor_plugin.h"
#include "editor/editor_node.h"
#include "editor/editor_string_names.h"
#include "editor/editor_undo_redo_manager.h"
#include "editor/gui/editor_spin_slider.h"
#include "editor/settings/editor_settings.h"
#include "editor/themes/editor_scale.h"
#include "scene/gui/option_button.h"
#include "scene/gui/view_panner.h"
#include "scene/resources/text_line.h"
#include <climits>
|
float AnimationBezierTrackEdit::_bezier_h_to_pixel(float p_h) {
float h = p_h;
|
h = (h - timeline_v_scroll) / timeline_v_zoom;
h = (get_size().height / 2.0) - h;
return h;
}
void AnimationBezierTrackEdit::_draw_track(int p_track, const Color &p_color) {
float scale = timeline->get_zoom_scale();
int limit = timeline->get_name_limit();
int right_limit = get_size().width;
// Selection may have altered the order of keys.
RBMap<real_t, int> key_order;
for (int i = 0; i < animation->track_get_key_count(p_track); i++) {
real_t ofs = animation->track_get_key_time(p_track, i);
if (selection.has(IntPair(p_track, i))) {
if (moving_selection) {
ofs += moving_selection_offset.x;
} else if (scaling_selection) {
ofs += -scaling_selection_offset.x + (ofs - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
}
}
key_order[ofs] = i;
}
for (RBMap<real_t, int>::Element *E = key_order.front(); E; E = E->next()) {
int i = E->get();
if (!E->next()) {
break;
}
int i_n = E->next()->get();
float offset = animation->track_get_key_time(p_track, i);
float height = animation->bezier_track_get_key_value(p_track, i);
Vector2 out_handle = animation->bezier_track_get_key_out_handle(p_track, i);
if (p_track == moving_handle_track && (moving_handle == -1 || moving_handle == 1) && moving_handle_key == i) {
out_handle = moving_handle_right;
}
if (selection.has(IntPair(p_track, i))) {
if (moving_selection) {
offset += moving_selection_offset.x;
height += moving_selection_offset.y;
} else if (scaling_selection) {
offset += -scaling_selection_offset.x + (offset - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
height += -scaling_selection_offset.y + (height - scaling_selection_pivot.y) * (scaling_selection_scale.y - 1);
}
}
float offset_n = animation->track_get_key_time(p_track, i_n);
float height_n = animation->bezier_track_get_key_value(p_track, i_n);
Vector2 in_handle = animation->bezier_track_get_key_in_handle(p_track, i_n);
|
random
|
<|fim_prefix|> tensors.push_back(tensor);
}
// allocate tensors / buffers and zero
bufs.reserve(ctx_map.size());
for (auto it : ctx_map) {
ggml_backend_buffer_type_t buft = it.first;
ggml_context * ctx = it.second;
ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
if (!buf) {
LLAMA_LOG_ERROR("%s: failed to allocate buffer for control vector\n", __func__);
return false;
}
ggml_backend_buffer_clear(buf, 0);
bufs.emplace_back(buf);
}
return true;
}
bool llama_adapter_cvec::apply(
const llama_model & model,
const float * data,
size_t len,
int32_t n_embd,
int32_t il_start,
int32_t il_end) {
const auto & hparams = model.hparams;
if (data == nullptr) {
// disable the current control vector (but leave allocated for later)
layer_start = -1;
layer_end = -1;
return true;
}
if (n_embd != (int) hparams.n_embd) {
LLAMA_LOG_ERROR("%s: control vector n_embd does not match model\n", __func__);
return false;
}
if (tensors.empty()) {
if (!init(model)) {
return false;
}
}
layer_start = il_start;
layer_end = il_end;
for (size_t il = 1; il < hparams.n_layer; il++) {
assert(tensors[il] != nullptr);
const size_t off = n_embd * (il - 1); // buffer doesn't have data for layer 0, since it's never present
if (off + n_embd <= len) {
ggml_backend_tensor_set(tensors[il], data + off, 0, n_embd * ggml_element_size(tensors[il]));
}
}
return true;
}
// lora
llama_adapter_lora_weight * llama_adapter_lora::get_weight(ggml_tensor * w) {
const std::string name(w->name);
const auto pos = ab_map.find(name);
if (pos != ab_map.end()) {
return &pos->second;
}<|fim_suffix|>static void llama_adapter_lora_init_impl(llama_model & model, const char * path_lora, llama_adapter_lora & adapter) {
LLAMA_LOG_INFO("%s: loading lora adapter from '%s' ...\n", __func__, path_lora);
ggml_context * ctx_init;
gguf_init_params meta_gguf_params = {
/* .no_alloc = */ true,
/* .ctx = */ &ctx_init,
};
gguf_context_ptr ctx_gguf { gguf_init_from_file(path_lora, meta_gguf_params) };
if (!ctx_gguf) {
throw std::runtime_error("failed to load lora adapter file from " + std::string(path_lora));
}
ggml_context_ptr ctx { ctx_init };
// check metadata
{
const gguf_context * gguf_ctx = ctx_gguf.get();
LLAMA_LOG_INFO("%s: Dumping metadata keys/values.\n", __func__);
// get metadata as string
for (int i = 0; i < gguf_get_n_kv(gguf_ctx); i++) {
gguf_type type = gguf_get_kv_type(gguf_ctx, i);
const std::string type_name =
type == GGUF_TYPE_ARRAY
? format("%s[%s,%zu]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(gguf_ctx, i)), gguf_get_arr_n(gguf_ctx, i))
: gguf_type_name(type);
const char * name = gguf_get_key(gguf_ctx, i);
const std::string value = gguf_kv_to_str(gguf_ctx, i);
if (type != GGUF_TYPE_ARRAY) {
adapter.gguf_kv.emplace(name, value);
}
const size_t MAX_VALUE_LEN = 40;
std::string print_value = value.size() > MAX_VALUE_LEN ? format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str()) : value;
replace_all(print_value, "\n", "\\n");
LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), print_value.c_str());
}
auto get_kv_str = [&](const std::string & key) -> std::string {
int id = gguf_find_key(gguf_ctx, key.c_str());
return id < 0 ? "" : std::string(gguf_get_val_str(gguf_ctx, id));<|fim_middle|>
return nullptr;
}
|
tensors.push_back(tensor);
}
// allocate tensors / buffers and zero
bufs.reserve(ctx_map.size());
for (auto it : ctx_map) {
ggml_backend_buffer_type_t buft = it.first;
ggml_context * ctx = it.second;
ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
if (!buf) {
LLAMA_LOG_ERROR("%s: failed to allocate buffer for control vector\n", __func__);
return false;
}
ggml_backend_buffer_clear(buf, 0);
bufs.emplace_back(buf);
}
return true;
}
bool llama_adapter_cvec::apply(
const llama_model & model,
const float * data,
size_t len,
int32_t n_embd,
int32_t il_start,
int32_t il_end) {
const auto & hparams = model.hparams;
if (data == nullptr) {
// disable the current control vector (but leave allocated for later)
layer_start = -1;
layer_end = -1;
return true;
}
if (n_embd != (int) hparams.n_embd) {
LLAMA_LOG_ERROR("%s: control vector n_embd does not match model\n", __func__);
return false;
}
if (tensors.empty()) {
if (!init(model)) {
return false;
}
}
layer_start = il_start;
layer_end = il_end;
for (size_t il = 1; il < hparams.n_layer; il++) {
assert(tensors[il] != nullptr);
const size_t off = n_embd * (il - 1); // buffer doesn't have data for layer 0, since it's never present
if (off + n_embd <= len) {
ggml_backend_tensor_set(tensors[il], data + off, 0, n_embd * ggml_element_size(tensors[il]));
}
}
return true;
}
// lora
llama_adapter_lora_weight * llama_adapter_lora::get_weight(ggml_tensor * w) {
const std::string name(w->name);
const auto pos = ab_map.find(name);
if (pos != ab_map.end()) {
return &pos->second;
}
|
return nullptr;
}
|
static void llama_adapter_lora_init_impl(llama_model & model, const char * path_lora, llama_adapter_lora & adapter) {
LLAMA_LOG_INFO("%s: loading lora adapter from '%s' ...\n", __func__, path_lora);
ggml_context * ctx_init;
gguf_init_params meta_gguf_params = {
/* .no_alloc = */ true,
/* .ctx = */ &ctx_init,
};
gguf_context_ptr ctx_gguf { gguf_init_from_file(path_lora, meta_gguf_params) };
if (!ctx_gguf) {
throw std::runtime_error("failed to load lora adapter file from " + std::string(path_lora));
}
ggml_context_ptr ctx { ctx_init };
// check metadata
{
const gguf_context * gguf_ctx = ctx_gguf.get();
LLAMA_LOG_INFO("%s: Dumping metadata keys/values.\n", __func__);
// get metadata as string
for (int i = 0; i < gguf_get_n_kv(gguf_ctx); i++) {
gguf_type type = gguf_get_kv_type(gguf_ctx, i);
const std::string type_name =
type == GGUF_TYPE_ARRAY
? format("%s[%s,%zu]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(gguf_ctx, i)), gguf_get_arr_n(gguf_ctx, i))
: gguf_type_name(type);
const char * name = gguf_get_key(gguf_ctx, i);
const std::string value = gguf_kv_to_str(gguf_ctx, i);
if (type != GGUF_TYPE_ARRAY) {
adapter.gguf_kv.emplace(name, value);
}
const size_t MAX_VALUE_LEN = 40;
std::string print_value = value.size() > MAX_VALUE_LEN ? format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str()) : value;
replace_all(print_value, "\n", "\\n");
LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), print_value.c_str());
}
auto get_kv_str = [&](const std::string & key) -> std::string {
int id = gguf_find_key(gguf_ctx, key.c_str());
return id < 0 ? "" : std::string(gguf_get_val_str(gguf_ctx, id));
|
random
|
<|fim_prefix|>Update datapath and language requested for the last valid initialization.
datapath_ = std::move(datapath);
if (datapath_.empty() && !tesseract_->datadir.empty()) {
datapath_ = tesseract_->datadir;
}
language_ = language;
last_oem_requested_ = oem;
#ifndef DISABLED_LEGACY_ENGINE
// For same language and datapath, just reset the adaptive classifier.
if (reset_classifier) {
tesseract_->ResetAdaptiveClassifier();
}
#endif // ndef DISABLED_LEGACY_ENGINE
return 0;
}
/**
* Returns the languages string used in the last valid initialization.
* If the last initialization specified "deu+hin" then that will be
* returned. If hin loaded eng automatically as well, then that will
* not be included in this list. To find the languages actually
* loaded use GetLoadedLanguagesAsVector.
* The returned string should NOT be deleted.
*/
const char *TessBaseAPI::GetInitLanguagesAsString() const {
return language_.c_str();
}
/**
* Returns the loaded languages in the vector of std::string.
* Includes all languages loaded by the last Init, including those loaded
* as dependencies of other loaded languages.
*/
void TessBaseAPI::GetLoadedLanguagesAsVector(std::vector<std::string> *langs) const {
langs->clear();
if (tesseract_ != nullptr) {
langs->push_back(tesseract_->lang);
int num_subs = tesseract_->num_sub_langs();
for (int i = 0; i < num_subs; ++i) {
langs->push_back(tesseract_->get_sub_lang(i)->lang);
}
}
}
/**
* Returns the available languages in the sorted vector of std::string.
*/
void TessBaseAPI::GetAvailableLanguagesAsVector(std::vector<std::string> *langs) const {
langs->clear();
if (tesseract_ != nullptr) {
addAvailableLanguages(tesseract_->datadir, langs);
std::sort(langs->begin(), langs->end());
}
}
/**
* Init only for page layout analysis. Use only for calls to SetImage and
* AnalysePage. Calls that attempt recognition will generate an error.
*/
void TessBaseAPI::InitForAnalysePage() {
<|fim_suffix|>
}
/**
* Read a "config" file containing a set of parameter name, value pairs.
* Searches the standard places: tessdata/configs, tessdata/tessconfigs
* and also accepts a relative or absolute path name.
*/
void TessBaseAPI::ReadConfigFile(const char *filename) {
tesseract_->read_config_file(filename, SET_PARAM_CONSTRAINT_NON_INIT_ONLY);
}
/** Same as above, but only set debug params from the given config file. */
void TessBaseAPI::ReadDebugConfigFile(const char *filename) {
tesseract_->read_config_file(filename, SET_PARAM_CONSTRAINT_DEBUG_ONLY);
}
/**
* Set the current page segmentation mode. Defaults to PSM_AUTO.
* The mode is stored as an IntParam so it can also be modified by
* ReadConfigFile or SetVariable("tessedit_pageseg_mode", mode as string).
*/
void TessBaseAPI::SetPageSegMode(PageSegMode mode) {
if (tesseract_ == nullptr) {
tesseract_ = new Tesseract;
}
tesseract_->tessedit_pageseg_mode.set_value(mode);
}
/** Return the current page segmentation mode. */
PageSegMode TessBaseAPI::GetPageSegMode() const {
if (tesseract_ == nullptr) {
return PSM_SINGLE_BLOCK;
}
return static_cast<PageSegMode>(static_cast<int>(tesseract_->tessedit_pageseg_mode));
}
/**
* Recognize a rectangle from an image and return the result as a string.
* May be called many times for a single Init.
* Currently has no error checking.
* Greyscale of 8 and color of 24 or 32 bits per pixel may be given.
* Palette color images will not work properly and must be converted to
* 24 bit.
* Binary images of 1 bit per pixel may also be given but they must be
* byte packed with the MSB of the first byte being the first pixel, and a
* one pixel is WHITE. For binary images set bytes_per_pixel=0.
* The recognized text is returned as a char* which is coded
* as UTF8 and must be freed with the delete [] operator.
*/
char *TessBaseAPI::TesseractRect(const unsigned char *imagedata, int bytes_per_pixel,
int bytes_per_line, int <|fim_middle|>if (tesseract_ == nullptr) {
tesseract_ = new Tesseract;
#ifndef DISABLED_LEGACY_ENGINE
tesseract_->InitAdaptiveClassifier(nullptr);
#endif
}
|
Update datapath and language requested for the last valid initialization.
datapath_ = std::move(datapath);
if (datapath_.empty() && !tesseract_->datadir.empty()) {
datapath_ = tesseract_->datadir;
}
language_ = language;
last_oem_requested_ = oem;
#ifndef DISABLED_LEGACY_ENGINE
// For same language and datapath, just reset the adaptive classifier.
if (reset_classifier) {
tesseract_->ResetAdaptiveClassifier();
}
#endif // ndef DISABLED_LEGACY_ENGINE
return 0;
}
/**
* Returns the languages string used in the last valid initialization.
* If the last initialization specified "deu+hin" then that will be
* returned. If hin loaded eng automatically as well, then that will
* not be included in this list. To find the languages actually
* loaded use GetLoadedLanguagesAsVector.
* The returned string should NOT be deleted.
*/
const char *TessBaseAPI::GetInitLanguagesAsString() const {
return language_.c_str();
}
/**
* Returns the loaded languages in the vector of std::string.
* Includes all languages loaded by the last Init, including those loaded
* as dependencies of other loaded languages.
*/
void TessBaseAPI::GetLoadedLanguagesAsVector(std::vector<std::string> *langs) const {
langs->clear();
if (tesseract_ != nullptr) {
langs->push_back(tesseract_->lang);
int num_subs = tesseract_->num_sub_langs();
for (int i = 0; i < num_subs; ++i) {
langs->push_back(tesseract_->get_sub_lang(i)->lang);
}
}
}
/**
* Returns the available languages in the sorted vector of std::string.
*/
void TessBaseAPI::GetAvailableLanguagesAsVector(std::vector<std::string> *langs) const {
langs->clear();
if (tesseract_ != nullptr) {
addAvailableLanguages(tesseract_->datadir, langs);
std::sort(langs->begin(), langs->end());
}
}
/**
* Init only for page layout analysis. Use only for calls to SetImage and
* AnalysePage. Calls that attempt recognition will generate an error.
*/
void TessBaseAPI::InitForAnalysePage() {
|
if (tesseract_ == nullptr) {
tesseract_ = new Tesseract;
#ifndef DISABLED_LEGACY_ENGINE
tesseract_->InitAdaptiveClassifier(nullptr);
#endif
}
|
}
/**
* Read a "config" file containing a set of parameter name, value pairs.
* Searches the standard places: tessdata/configs, tessdata/tessconfigs
* and also accepts a relative or absolute path name.
*/
void TessBaseAPI::ReadConfigFile(const char *filename) {
tesseract_->read_config_file(filename, SET_PARAM_CONSTRAINT_NON_INIT_ONLY);
}
/** Same as above, but only set debug params from the given config file. */
void TessBaseAPI::ReadDebugConfigFile(const char *filename) {
tesseract_->read_config_file(filename, SET_PARAM_CONSTRAINT_DEBUG_ONLY);
}
/**
* Set the current page segmentation mode. Defaults to PSM_AUTO.
* The mode is stored as an IntParam so it can also be modified by
* ReadConfigFile or SetVariable("tessedit_pageseg_mode", mode as string).
*/
void TessBaseAPI::SetPageSegMode(PageSegMode mode) {
if (tesseract_ == nullptr) {
tesseract_ = new Tesseract;
}
tesseract_->tessedit_pageseg_mode.set_value(mode);
}
/** Return the current page segmentation mode. */
PageSegMode TessBaseAPI::GetPageSegMode() const {
if (tesseract_ == nullptr) {
return PSM_SINGLE_BLOCK;
}
return static_cast<PageSegMode>(static_cast<int>(tesseract_->tessedit_pageseg_mode));
}
/**
* Recognize a rectangle from an image and return the result as a string.
* May be called many times for a single Init.
* Currently has no error checking.
* Greyscale of 8 and color of 24 or 32 bits per pixel may be given.
* Palette color images will not work properly and must be converted to
* 24 bit.
* Binary images of 1 bit per pixel may also be given but they must be
* byte packed with the MSB of the first byte being the first pixel, and a
* one pixel is WHITE. For binary images set bytes_per_pixel=0.
* The recognized text is returned as a char* which is coded
* as UTF8 and must be freed with the delete [] operator.
*/
char *TessBaseAPI::TesseractRect(const unsigned char *imagedata, int bytes_per_pixel,
int bytes_per_line, int
|
ast_based
|
<|fim_prefix|>MP;
String stringver = String(dict["major"]) + "." + String(dict["minor"]);
if ((int)dict["patch"] != 0) {
stringver += "." + String(dict["patch"]);
}
stringver += "-" + String(dict["status"]) + " (" + String(dict["build"]) + ")";
dict["string"] = stringver;
return dict;
}
static Array array_from_info(const char *const *info_list) {
Array arr;
for (int i = 0; info_list[i] != nullptr; i++) {
arr.push_back(String::utf8(info_list[i]));
}
return arr;
}
static Array array_from_info_count(const char *const *info_list, int info_count) {
Array arr;
for (int i = 0; i < info_count; i++) {
arr.push_back(String::utf8(info_list[i]));
}
return arr;
}
Dictionary Engine::get_author_info() const {
Dictionary dict;
dict["lead_developers"] = array_from_info(AUTHORS_LEAD_DEVELOPERS);
dict["project_managers"] = array_from_info(AUTHORS_PROJECT_MANAGERS);
dict["founders"] = array_from_info(AUTHORS_FOUNDERS);
dict["developers"] = array_from_info(AUTHORS_DEVELOPERS);
return dict;
}
TypedArray<Dictionary> Engine::get_copyright_info() const {
TypedArray<Dictionary> components;
for (int component_index = 0; component_index < COPYRIGHT_INFO_COUNT; component_index++) {
const ComponentCopyright &cp_info = COPYRIGHT_INFO[component_index];
Dictionary component_dict;
component_dict["name"] = String::utf8(cp_info.name);
Array parts;
for (int i = 0; i < cp_info.part_count; i++) {
const ComponentCopyrightPart &cp_part = cp_info.parts[i];
Dictionary part_dict;
part_dict["files"] = array_from_info_count(cp_part.files, cp_part.file_count);
part_dict["copyright"] = array_from_info_count(cp_part.copyright_statements, cp_part.copyright_count);
part_dict["license"] = String::utf8(cp_part.license);
parts.push_back(part_dict);
}
component_dict["parts"] = parts;
components.push_back(component_dict);
}
return components;
}
Dictionary Engine::get_donor_info() const {
Dictionary donors;
donors["patrons"] = array_from_info(DONORS_PATRONS);
<|fim_suffix|>
donors["gold_sponsors"] = array_from_info(DONORS_SPONSORS_GOLD);
donors["silver_sponsors"] = array_from_info(DONORS_SPONSORS_SILVER);
donors["diamond_members"] = array_from_info(DONORS_MEMBERS_DIAMOND);
donors["titanium_members"] = array_from_info(DONORS_MEMBERS_TITANIUM);
donors["platinum_members"] = array_from_info(DONORS_MEMBERS_PLATINUM);
donors["gold_members"] = array_from_info(DONORS_MEMBERS_GOLD);
return donors;
}
Dictionary Engine::get_license_info() const {
Dictionary licenses;
for (int i = 0; i < LICENSE_COUNT; i++) {
licenses[LICENSE_NAMES[i]] = LICENSE_BODIES[i];
}
return licenses;
}
String Engine::get_license_text() const {
return String(GODOT_LICENSE_TEXT);
}
String Engine::get_architecture_name() const {
#if defined(__x86_64) || defined(__x86_64__) || defined(__amd64__) || defined(_M_X64)
return "x86_64";
#elif defined(__i386) || defined(__i386__) || defined(_M_IX86)
return "x86_32";
#elif defined(__aarch64__) || defined(_M_ARM64) || defined(_M_ARM64EC)
return "arm64";
#elif defined(__arm__) || defined(_M_ARM)
return "arm32";
#elif defined(__riscv)
return "rv64";
#elif defined(__powerpc64__)
return "ppc64";
#elif defined(__loongarch64)
return "loongarch64";
#elif defined(__wasm64__)
return "wasm64";
#elif defined(__wasm32__)
return "wasm32";
#endif
}
bool Engine::is_abort_on_gpu_errors_enabled() const {
return abort_on_gpu_errors;
}
int32_t Engine::get_gpu_index() const {
return gpu_idx;
}
bool Engine::is_validation_layers_enabled() const {
return use_validation_layers;
}
bool Engine::is_generate_spirv_debug_info_enabled() const {
return generate_spirv_debug_info;
}
bool Engine::is_extra_gpu_memory_tracking_enabled() const {
return extra_gpu_memory_tracking;
}
#if defined(DEBUG_ENABLED) || defined(DEV_ENABLED)
bool Engine::is_accurate_breadcrumbs_enabled() const {
return accurate_breadcrumbs;
}
#endif
void Engine::set_print_to_stdout(bool p_enabled) {
CoreGlobals::print_line_enabled = p_enabled;
}
bool Engine:<|fim_middle|>donors["platinum_sponsors"] = array_from_info(DONORS_SPONSORS_PLATINUM);
|
MP;
String stringver = String(dict["major"]) + "." + String(dict["minor"]);
if ((int)dict["patch"] != 0) {
stringver += "." + String(dict["patch"]);
}
stringver += "-" + String(dict["status"]) + " (" + String(dict["build"]) + ")";
dict["string"] = stringver;
return dict;
}
static Array array_from_info(const char *const *info_list) {
Array arr;
for (int i = 0; info_list[i] != nullptr; i++) {
arr.push_back(String::utf8(info_list[i]));
}
return arr;
}
static Array array_from_info_count(const char *const *info_list, int info_count) {
Array arr;
for (int i = 0; i < info_count; i++) {
arr.push_back(String::utf8(info_list[i]));
}
return arr;
}
Dictionary Engine::get_author_info() const {
Dictionary dict;
dict["lead_developers"] = array_from_info(AUTHORS_LEAD_DEVELOPERS);
dict["project_managers"] = array_from_info(AUTHORS_PROJECT_MANAGERS);
dict["founders"] = array_from_info(AUTHORS_FOUNDERS);
dict["developers"] = array_from_info(AUTHORS_DEVELOPERS);
return dict;
}
TypedArray<Dictionary> Engine::get_copyright_info() const {
TypedArray<Dictionary> components;
for (int component_index = 0; component_index < COPYRIGHT_INFO_COUNT; component_index++) {
const ComponentCopyright &cp_info = COPYRIGHT_INFO[component_index];
Dictionary component_dict;
component_dict["name"] = String::utf8(cp_info.name);
Array parts;
for (int i = 0; i < cp_info.part_count; i++) {
const ComponentCopyrightPart &cp_part = cp_info.parts[i];
Dictionary part_dict;
part_dict["files"] = array_from_info_count(cp_part.files, cp_part.file_count);
part_dict["copyright"] = array_from_info_count(cp_part.copyright_statements, cp_part.copyright_count);
part_dict["license"] = String::utf8(cp_part.license);
parts.push_back(part_dict);
}
component_dict["parts"] = parts;
components.push_back(component_dict);
}
return components;
}
Dictionary Engine::get_donor_info() const {
Dictionary donors;
donors["patrons"] = array_from_info(DONORS_PATRONS);
|
donors["platinum_sponsors"] = array_from_info(DONORS_SPONSORS_PLATINUM);
|
donors["gold_sponsors"] = array_from_info(DONORS_SPONSORS_GOLD);
donors["silver_sponsors"] = array_from_info(DONORS_SPONSORS_SILVER);
donors["diamond_members"] = array_from_info(DONORS_MEMBERS_DIAMOND);
donors["titanium_members"] = array_from_info(DONORS_MEMBERS_TITANIUM);
donors["platinum_members"] = array_from_info(DONORS_MEMBERS_PLATINUM);
donors["gold_members"] = array_from_info(DONORS_MEMBERS_GOLD);
return donors;
}
Dictionary Engine::get_license_info() const {
Dictionary licenses;
for (int i = 0; i < LICENSE_COUNT; i++) {
licenses[LICENSE_NAMES[i]] = LICENSE_BODIES[i];
}
return licenses;
}
String Engine::get_license_text() const {
return String(GODOT_LICENSE_TEXT);
}
String Engine::get_architecture_name() const {
#if defined(__x86_64) || defined(__x86_64__) || defined(__amd64__) || defined(_M_X64)
return "x86_64";
#elif defined(__i386) || defined(__i386__) || defined(_M_IX86)
return "x86_32";
#elif defined(__aarch64__) || defined(_M_ARM64) || defined(_M_ARM64EC)
return "arm64";
#elif defined(__arm__) || defined(_M_ARM)
return "arm32";
#elif defined(__riscv)
return "rv64";
#elif defined(__powerpc64__)
return "ppc64";
#elif defined(__loongarch64)
return "loongarch64";
#elif defined(__wasm64__)
return "wasm64";
#elif defined(__wasm32__)
return "wasm32";
#endif
}
bool Engine::is_abort_on_gpu_errors_enabled() const {
return abort_on_gpu_errors;
}
int32_t Engine::get_gpu_index() const {
return gpu_idx;
}
bool Engine::is_validation_layers_enabled() const {
return use_validation_layers;
}
bool Engine::is_generate_spirv_debug_info_enabled() const {
return generate_spirv_debug_info;
}
bool Engine::is_extra_gpu_memory_tracking_enabled() const {
return extra_gpu_memory_tracking;
}
#if defined(DEBUG_ENABLED) || defined(DEV_ENABLED)
bool Engine::is_accurate_breadcrumbs_enabled() const {
return accurate_breadcrumbs;
}
#endif
void Engine::set_print_to_stdout(bool p_enabled) {
CoreGlobals::print_line_enabled = p_enabled;
}
bool Engine:
|
ast_based
|
<|fim_prefix|>mber + space)
* * kNumbersPerBlob plus the newline. Add to this the
* original UTF8 characters, and one kMaxBytesPerLine for safety.
*/
const int kBytesPerBoxFileLine = (kBytesPerNumber + 1) * kNumbersPerBlob + 1;
/** Max bytes in the decimal representation of int64_t. */
const int kBytesPer64BitNumber = 20;
/**
* A maximal single box could occupy kNumbersPerBlob numbers at
* kBytesPer64BitNumber digits (if someone sneaks in a 64 bit value) and a
* space plus the newline and the maximum length of a UNICHAR.
* Test against this on each iteration for safety.
*/
const int kMaxBytesPerLine = kNumbersPerBlob * (kBytesPer64BitNumber + 1) + 1 + UNICHAR_LEN;
/**
* The recognized text is returned as a char* which is coded
* as a UTF8 box file.
* page_number is a 0-base page index that will appear in the box file.
* Returned string must be freed with the delete [] operator.
*/
char *TessBaseAPI::GetBoxText(int page_number) {
if (tesseract_ == nullptr || (!recognition_done_ && Recognize(nullptr) < 0)) {
return nullptr;
}
int blob_count;
int utf8_length = TextLength(&blob_count);
int total_length = blob_count * kBytesPerBoxFileLine + utf8_length + kMaxBytesPerLine;
char *result = new char[total_length];
result[0] = '\0';
int output_length = 0;
LTRResultIterator *it = GetLTRIterator();
do {
int left, top, right, bottom;
if (it->BoundingBox(RIL_SYMBOL, &left, &top, &right, &bottom)) {
const std::unique_ptr</*non-const*/ char[]> text(it->GetUTF8Text(RIL_SYMBOL));
// Tesseract uses space for recognition failure. Fix to a reject
// character, kTesseractReject so we don't create illegal box files.
for (int i = 0; text[i] != '\0'; ++i) {
if (text[i] == ' ') {
text[i] = kTesseractReject;
}
}
snprintf(result + output_length, total_length - output_length, "%s %d %d %d %d %d\n",
text.get(), left, image_height_ - bottom, right, image_height_ - top, page_number);
<|fim_suffix|>;
// Just in case...
if (output_length + kMaxBytesPerLine > total_length) {
break;
}
}
} while (it->Next(RIL_SYMBOL));
delete it;
return result;
}
/**
* Conversion table for non-latin characters.
* Maps characters out of the latin set into the latin set.
* TODO(rays) incorporate this translation into unicharset.
*/
const int kUniChs[] = {0x20ac, 0x201c, 0x201d, 0x2018, 0x2019, 0x2022, 0x2014, 0};
/** Latin chars corresponding to the unicode chars above. */
const int kLatinChs[] = {0x00a2, 0x0022, 0x0022, 0x0027, 0x0027, 0x00b7, 0x002d, 0};
/**
* The recognized text is returned as a char* which is coded
* as UNLV format Latin-1 with specific reject and suspect codes.
* Returned string must be freed with the delete [] operator.
*/
char *TessBaseAPI::GetUNLVText() {
if (tesseract_ == nullptr || (!recognition_done_ && Recognize(nullptr) < 0)) {
return nullptr;
}
bool tilde_crunch_written = false;
bool last_char_was_newline = true;
bool last_char_was_tilde = false;
int total_length = TextLength(nullptr);
PAGE_RES_IT page_res_it(page_res_);
char *result = new char[total_length];
char *ptr = result;
for (page_res_it.restart_page(); page_res_it.word() != nullptr; page_res_it.forward()) {
WERD_RES *word = page_res_it.word();
// Process the current word.
if (word->unlv_crunch_mode != CR_NONE) {
if (word->unlv_crunch_mode != CR_DELETE &&
(!tilde_crunch_written ||
(word->unlv_crunch_mode == CR_KEEP_SPACE && word->word->space() > 0 &&
!word->word->flag(W_FUZZY_NON) && !word->word->flag(W_FUZZY_SP)))) {
if (!word->word->flag(W_BOL) && word->word->space() > 0 && !word->word->flag(W_FUZZY_NON) &&
!word->word->flag(W_FUZZY_SP)) {
/* Write a space to separate from preceding good text */
*ptr++ = ' ';
last_char_was_tilde = false;
}
if (!last_char_was_tilde) {
// Write a reject char.
<|fim_middle|>output_length += strlen(result + output_length)
|
mber + space)
* * kNumbersPerBlob plus the newline. Add to this the
* original UTF8 characters, and one kMaxBytesPerLine for safety.
*/
const int kBytesPerBoxFileLine = (kBytesPerNumber + 1) * kNumbersPerBlob + 1;
/** Max bytes in the decimal representation of int64_t. */
const int kBytesPer64BitNumber = 20;
/**
* A maximal single box could occupy kNumbersPerBlob numbers at
* kBytesPer64BitNumber digits (if someone sneaks in a 64 bit value) and a
* space plus the newline and the maximum length of a UNICHAR.
* Test against this on each iteration for safety.
*/
const int kMaxBytesPerLine = kNumbersPerBlob * (kBytesPer64BitNumber + 1) + 1 + UNICHAR_LEN;
/**
* The recognized text is returned as a char* which is coded
* as a UTF8 box file.
* page_number is a 0-base page index that will appear in the box file.
* Returned string must be freed with the delete [] operator.
*/
char *TessBaseAPI::GetBoxText(int page_number) {
if (tesseract_ == nullptr || (!recognition_done_ && Recognize(nullptr) < 0)) {
return nullptr;
}
int blob_count;
int utf8_length = TextLength(&blob_count);
int total_length = blob_count * kBytesPerBoxFileLine + utf8_length + kMaxBytesPerLine;
char *result = new char[total_length];
result[0] = '\0';
int output_length = 0;
LTRResultIterator *it = GetLTRIterator();
do {
int left, top, right, bottom;
if (it->BoundingBox(RIL_SYMBOL, &left, &top, &right, &bottom)) {
const std::unique_ptr</*non-const*/ char[]> text(it->GetUTF8Text(RIL_SYMBOL));
// Tesseract uses space for recognition failure. Fix to a reject
// character, kTesseractReject so we don't create illegal box files.
for (int i = 0; text[i] != '\0'; ++i) {
if (text[i] == ' ') {
text[i] = kTesseractReject;
}
}
snprintf(result + output_length, total_length - output_length, "%s %d %d %d %d %d\n",
text.get(), left, image_height_ - bottom, right, image_height_ - top, page_number);
|
output_length += strlen(result + output_length)
|
;
// Just in case...
if (output_length + kMaxBytesPerLine > total_length) {
break;
}
}
} while (it->Next(RIL_SYMBOL));
delete it;
return result;
}
/**
* Conversion table for non-latin characters.
* Maps characters out of the latin set into the latin set.
* TODO(rays) incorporate this translation into unicharset.
*/
const int kUniChs[] = {0x20ac, 0x201c, 0x201d, 0x2018, 0x2019, 0x2022, 0x2014, 0};
/** Latin chars corresponding to the unicode chars above. */
const int kLatinChs[] = {0x00a2, 0x0022, 0x0022, 0x0027, 0x0027, 0x00b7, 0x002d, 0};
/**
* The recognized text is returned as a char* which is coded
* as UNLV format Latin-1 with specific reject and suspect codes.
* Returned string must be freed with the delete [] operator.
*/
char *TessBaseAPI::GetUNLVText() {
if (tesseract_ == nullptr || (!recognition_done_ && Recognize(nullptr) < 0)) {
return nullptr;
}
bool tilde_crunch_written = false;
bool last_char_was_newline = true;
bool last_char_was_tilde = false;
int total_length = TextLength(nullptr);
PAGE_RES_IT page_res_it(page_res_);
char *result = new char[total_length];
char *ptr = result;
for (page_res_it.restart_page(); page_res_it.word() != nullptr; page_res_it.forward()) {
WERD_RES *word = page_res_it.word();
// Process the current word.
if (word->unlv_crunch_mode != CR_NONE) {
if (word->unlv_crunch_mode != CR_DELETE &&
(!tilde_crunch_written ||
(word->unlv_crunch_mode == CR_KEEP_SPACE && word->word->space() > 0 &&
!word->word->flag(W_FUZZY_NON) && !word->word->flag(W_FUZZY_SP)))) {
if (!word->word->flag(W_BOL) && word->word->space() > 0 && !word->word->flag(W_FUZZY_NON) &&
!word->word->flag(W_FUZZY_SP)) {
/* Write a space to separate from preceding good text */
*ptr++ = ' ';
last_char_was_tilde = false;
}
if (!last_char_was_tilde) {
// Write a reject char.
|
ast_based
|
<|fim_prefix|> accesskit_node_clear_label(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_extra_info(const RID &p_id, const String &p_name_extra_info) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
ae->name_extra_info = p_name_extra_info;
String full_name = ae->name + " " + ae->name_extra_info;
if (!full_name.is_empty()) {
accesskit_node_set_label(ae->node, full_name.utf8().ptr());
} else {
accesskit_node_clear_label(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_description(const RID &p_id, const String &p_description) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_description.is_empty()) {
accesskit_node_set_description(ae->node, p_description.utf8().ptr());
} else {
accesskit_node_clear_description(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_value(const RID &p_id, const String &p_value) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_value.is_empty()) {
Vector<uint8_t> ch_length;
accesskit_node_set_value(ae->node, p_value.utf8(&ch_length).ptr());
accesskit_node_set_character_lengths(ae->node, ch_length.size(), ch_length.ptr());
} else {
accesskit_node_clear_value(ae->node);
accesskit_node_clear_character_lengths(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_tooltip(const RID &p_id, const String &p_tooltip) {<|fim_suffix|>
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_tooltip.is_empty()) {
accesskit_node_set_tooltip(ae->node, p_tooltip.utf8().ptr());
} else {
accesskit_node_clear_tooltip(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_bounds(const RID &p_id, const Rect2 &p_rect) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_rect rect;
rect.x0 = p_rect.position.x;
rect.y0 = p_rect.position.y;
rect.x1 = p_rect.position.x + p_rect.size.x;
rect.y1 = p_rect.position.y + p_rect.size.y;
accesskit_node_set_bounds(ae->node, rect);
}
void AccessibilityDriverAccessKit::accessibility_update_set_transform(const RID &p_id, const Transform2D &p_transform) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_affine transform = { p_transform.columns[0][0], p_transform.columns[0][1], p_transform.columns[1][0], p_transform.columns[1][1], p_transform.columns[2][0], p_transform.columns[2][1] };
accesskit_node_set_transform(ae->node, transform);
}
void AccessibilityDriverAccessKit::accessibility_update_add_child(const RID &p_id, const RID &p_child_id) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
AccessibilityElement *other_ae = rid_owner.get_or_null(p_child_id);
ERR_FAIL_NULL(other_ae);
ERR_FAIL_COND(other_ae->window_id != ae->window_id);
_ensure_node(p_id, ae);
<|fim_middle|> ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
|
accesskit_node_clear_label(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_extra_info(const RID &p_id, const String &p_name_extra_info) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
ae->name_extra_info = p_name_extra_info;
String full_name = ae->name + " " + ae->name_extra_info;
if (!full_name.is_empty()) {
accesskit_node_set_label(ae->node, full_name.utf8().ptr());
} else {
accesskit_node_clear_label(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_description(const RID &p_id, const String &p_description) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_description.is_empty()) {
accesskit_node_set_description(ae->node, p_description.utf8().ptr());
} else {
accesskit_node_clear_description(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_value(const RID &p_id, const String &p_value) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_value.is_empty()) {
Vector<uint8_t> ch_length;
accesskit_node_set_value(ae->node, p_value.utf8(&ch_length).ptr());
accesskit_node_set_character_lengths(ae->node, ch_length.size(), ch_length.ptr());
} else {
accesskit_node_clear_value(ae->node);
accesskit_node_clear_character_lengths(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_tooltip(const RID &p_id, const String &p_tooltip) {
|
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
|
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_tooltip.is_empty()) {
accesskit_node_set_tooltip(ae->node, p_tooltip.utf8().ptr());
} else {
accesskit_node_clear_tooltip(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_bounds(const RID &p_id, const Rect2 &p_rect) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_rect rect;
rect.x0 = p_rect.position.x;
rect.y0 = p_rect.position.y;
rect.x1 = p_rect.position.x + p_rect.size.x;
rect.y1 = p_rect.position.y + p_rect.size.y;
accesskit_node_set_bounds(ae->node, rect);
}
void AccessibilityDriverAccessKit::accessibility_update_set_transform(const RID &p_id, const Transform2D &p_transform) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_affine transform = { p_transform.columns[0][0], p_transform.columns[0][1], p_transform.columns[1][0], p_transform.columns[1][1], p_transform.columns[2][0], p_transform.columns[2][1] };
accesskit_node_set_transform(ae->node, transform);
}
void AccessibilityDriverAccessKit::accessibility_update_add_child(const RID &p_id, const RID &p_child_id) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
AccessibilityElement *other_ae = rid_owner.get_or_null(p_child_id);
ERR_FAIL_NULL(other_ae);
ERR_FAIL_COND(other_ae->window_id != ae->window_id);
_ensure_node(p_id, ae);
|
random
|
<|fim_prefix|>// Missing features or Issues:
// [ ] Renderer: The renderer is suboptimal as we need to unindex our buffers and convert vertices manually.
// [ ] Platform: Missing gamepad support.
// You can use unmodified imgui_impl_* files in your project. See examples/ folder for examples of using this.
// Prefer including the entire imgui/ repository into your project (either as a copy or as a submodule), and only build the backends you need.
// Learn about Dear ImGui:
// - FAQ https://dearimgui.com/faq
// - Getting Started https://dearimgui.com/getting-started
// - Documentation https://dearimgui.com/docs (same as your local docs/ folder).
// - Introduction, links and more at the top of imgui.cpp
// CHANGELOG
// (minor and older changes stripped away, please see git history for details)
// 2025-09-18: Call platform_io.ClearRendererHandlers() and platform_io.ClearPlatformHandlers() on shutdown.
// 2025-08-12: Inputs: fixed missing support for ImGuiKey_PrintScreen under Windows, as raw Allegro 5 does not receive it.
// 2025-08-12: Added ImGui_ImplAllegro5_SetDisplay() function to change current ALLEGRO_DISPLAY, as Allegro applications often need to do that.
// 2025-07-07: Fixed texture update broken on some platforms where ALLEGRO_LOCK_WRITEONLY needed all texels to be rewritten.
// 2025-06-11: Added support for ImGuiBackendFlags_RendererHasTextures, for dynamic font atlas. Removed ImGui_ImplSDLGPU3_CreateFontsTexture() and ImGui_ImplSDLGPU3_DestroyFontsTexture().
// 2025-02-18: Added ImGuiMouseCursor_Wait and ImGuiMouseCursor_Progress mouse cursor support.
// 2025-01-06: Avoid calling al_set_mouse_cursor() repeatedly since it appears to leak on on X11 (#8256).
// 2024-08-22: moved some OS/backend related function pointers from ImGuiIO to ImGuiPlatformIO:
// - io.GetClipboardTextFn -> platform_io.Platform_GetClipboardTextFn
// - io.SetClipboardTextFn -> platform_io.Platform_SetClipboardTextFn<|fim_suffix|>// 2022-01-26: Inputs: replaced short-lived io.AddKeyModsEvent() (added two weeks ago) with io.AddKeyEvent() using ImGuiKey_ModXXX flags. Sorry for the confusion.
// 2022-01-17: Inputs: calling new io.AddMousePosEvent(), io.AddMouseButtonEvent(), io.AddMouseWheelEvent() API (1.87+).
// 2022-01-17: Inputs: always calling io.AddKeyModsEvent() next and before key event (not in NewFrame) to fix input queue with very low framerates.
// 2022-01-10: Inputs: calling new io.AddKeyEvent(), io.AddKeyModsEvent() + io.SetKeyEventNativeData() API (1.87+). Support for full ImGuiKey range.
// 2021-12-08: Renderer: Fixed mishandling of the ImDrawCmd::IdxOffset field! This is an old bug but it never had an effect until some internal rendering changes in 1.86.
// 2021-08-17: Calling io.AddFocusEvent() on ALLEGRO_EVENT_DISPLAY_SWITCH_OUT/ALLEGRO_EVENT_DISPLAY_SWITCH_IN events.
// 2021-06-29: Reorganized backend to pull data from a single structure to facilitate usage with multiple-contexts (all g_XXXX access changed to bd->XXXX).
// 2021-05-19: Renderer: Replaced direct access to ImDrawCmd::TextureId with a call to ImDrawCmd::GetTexID(). (will become a requirement)
// 2021-02-18: Change blending equation to preserve alpha in output buffer.
// 2020-08-10: Inputs: Fixed horizontal mouse wheel direction.
// 2019-12-05: Inputs: Added support for ImGuiMouseCursor_NotAllowed mouse cursor.
// 2019-07-21: Inputs: Added mapping for ImGuiKey_KeyPadEnter.
// 2019-05-11: Inputs: Don't filter character value from ALLEGRO_EVENT_KEY_CHAR before calling AddInputCharacter().
// 2019-04-30: Renderer: Added support for special ImDrawCallback_ResetRenderState callback to reset render state.
// 2018-11-30: Platform: Added touchscreen support.
// 2018-11-30: Misc: Setting up io.BackendPlatformName/io.BackendRendererName so they can be displayed in the About Window.
// 2018-06-13: Platform: Added clipboard support (from Allegro 5.1.12).<|fim_middle|>// 2022-11-30: Renderer: Restoring using al_draw_indexed_prim() when Allegro version is >= 5.2.5.
// 2022-10-11: Using 'nullptr' instead of 'NULL' as per our switch to C++11.
// 2022-09-26: Inputs: Renamed ImGuiKey_ModXXX introduced in 1.87 to ImGuiMod_XXX (old names still supported).
|
// Missing features or Issues:
// [ ] Renderer: The renderer is suboptimal as we need to unindex our buffers and convert vertices manually.
// [ ] Platform: Missing gamepad support.
// You can use unmodified imgui_impl_* files in your project. See examples/ folder for examples of using this.
// Prefer including the entire imgui/ repository into your project (either as a copy or as a submodule), and only build the backends you need.
// Learn about Dear ImGui:
// - FAQ https://dearimgui.com/faq
// - Getting Started https://dearimgui.com/getting-started
// - Documentation https://dearimgui.com/docs (same as your local docs/ folder).
// - Introduction, links and more at the top of imgui.cpp
// CHANGELOG
// (minor and older changes stripped away, please see git history for details)
// 2025-09-18: Call platform_io.ClearRendererHandlers() and platform_io.ClearPlatformHandlers() on shutdown.
// 2025-08-12: Inputs: fixed missing support for ImGuiKey_PrintScreen under Windows, as raw Allegro 5 does not receive it.
// 2025-08-12: Added ImGui_ImplAllegro5_SetDisplay() function to change current ALLEGRO_DISPLAY, as Allegro applications often need to do that.
// 2025-07-07: Fixed texture update broken on some platforms where ALLEGRO_LOCK_WRITEONLY needed all texels to be rewritten.
// 2025-06-11: Added support for ImGuiBackendFlags_RendererHasTextures, for dynamic font atlas. Removed ImGui_ImplSDLGPU3_CreateFontsTexture() and ImGui_ImplSDLGPU3_DestroyFontsTexture().
// 2025-02-18: Added ImGuiMouseCursor_Wait and ImGuiMouseCursor_Progress mouse cursor support.
// 2025-01-06: Avoid calling al_set_mouse_cursor() repeatedly since it appears to leak on on X11 (#8256).
// 2024-08-22: moved some OS/backend related function pointers from ImGuiIO to ImGuiPlatformIO:
// - io.GetClipboardTextFn -> platform_io.Platform_GetClipboardTextFn
// - io.SetClipboardTextFn -> platform_io.Platform_SetClipboardTextFn
|
// 2022-11-30: Renderer: Restoring using al_draw_indexed_prim() when Allegro version is >= 5.2.5.
// 2022-10-11: Using 'nullptr' instead of 'NULL' as per our switch to C++11.
// 2022-09-26: Inputs: Renamed ImGuiKey_ModXXX introduced in 1.87 to ImGuiMod_XXX (old names still supported).
|
// 2022-01-26: Inputs: replaced short-lived io.AddKeyModsEvent() (added two weeks ago) with io.AddKeyEvent() using ImGuiKey_ModXXX flags. Sorry for the confusion.
// 2022-01-17: Inputs: calling new io.AddMousePosEvent(), io.AddMouseButtonEvent(), io.AddMouseWheelEvent() API (1.87+).
// 2022-01-17: Inputs: always calling io.AddKeyModsEvent() next and before key event (not in NewFrame) to fix input queue with very low framerates.
// 2022-01-10: Inputs: calling new io.AddKeyEvent(), io.AddKeyModsEvent() + io.SetKeyEventNativeData() API (1.87+). Support for full ImGuiKey range.
// 2021-12-08: Renderer: Fixed mishandling of the ImDrawCmd::IdxOffset field! This is an old bug but it never had an effect until some internal rendering changes in 1.86.
// 2021-08-17: Calling io.AddFocusEvent() on ALLEGRO_EVENT_DISPLAY_SWITCH_OUT/ALLEGRO_EVENT_DISPLAY_SWITCH_IN events.
// 2021-06-29: Reorganized backend to pull data from a single structure to facilitate usage with multiple-contexts (all g_XXXX access changed to bd->XXXX).
// 2021-05-19: Renderer: Replaced direct access to ImDrawCmd::TextureId with a call to ImDrawCmd::GetTexID(). (will become a requirement)
// 2021-02-18: Change blending equation to preserve alpha in output buffer.
// 2020-08-10: Inputs: Fixed horizontal mouse wheel direction.
// 2019-12-05: Inputs: Added support for ImGuiMouseCursor_NotAllowed mouse cursor.
// 2019-07-21: Inputs: Added mapping for ImGuiKey_KeyPadEnter.
// 2019-05-11: Inputs: Don't filter character value from ALLEGRO_EVENT_KEY_CHAR before calling AddInputCharacter().
// 2019-04-30: Renderer: Added support for special ImDrawCallback_ResetRenderState callback to reset render state.
// 2018-11-30: Platform: Added touchscreen support.
// 2018-11-30: Misc: Setting up io.BackendPlatformName/io.BackendRendererName so they can be displayed in the About Window.
// 2018-06-13: Platform: Added clipboard support (from Allegro 5.1.12).
|
random
|
<|fim_prefix|>e.\n");
ASSERT_HOST(false);
default:
break;
}
const std::unique_ptr<const char[]> para_text(it->GetUTF8Text(RIL_PARA));
text += para_text.get();
} while (it->Next(RIL_PARA));
return copy_string(text);
}
static void AddBoxToTSV(const PageIterator *it, PageIteratorLevel level, std::string &text) {
int left, top, right, bottom;
it->BoundingBox(level, &left, &top, &right, &bottom);
text += "\t" + std::to_string(left);
text += "\t" + std::to_string(top);
text += "\t" + std::to_string(right - left);
text += "\t" + std::to_string(bottom - top);
}
/**
* Make a TSV-formatted string from the internal data structures.
* page_number is 0-based but will appear in the output as 1-based.
* Returned string must be freed with the delete [] operator.
*/
char *TessBaseAPI::GetTSVText(int page_number) {
if (tesseract_ == nullptr || (page_res_ == nullptr && Recognize(nullptr) < 0)) {
return nullptr;
}
#if !defined(NDEBUG)
int lcnt = 1, bcnt = 1, pcnt = 1, wcnt = 1;
#endif
int page_id = page_number + 1; // we use 1-based page numbers.
int page_num = page_id;
int block_num = 0;
int par_num = 0;
int line_num = 0;
int word_num = 0;
std::string tsv_str;
tsv_str += "1\t" + std::to_string(page_num); // level 1 - page
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
tsv_str += "\t" + std::to_string(rect_left_);
tsv_str += "\t" + std::to_string(rect_top_);
tsv_str += "\t" + std::to_string(rect_width_);
tsv_str += "\t" + std::to_string(rect_height_);
tsv_str += "\t-1\t\n";
const std::unique_ptr</*non-const*/ ResultIterator> res_it(GetIterator());
while (!res_it->Empty(RIL_BLOCK)) {
if (res_it->Empty(RIL_WORD)) {
res_it->Next(RIL_WORD);
continue;
}
// Add rows for any new block/paragraph/textline.
if (res_it->IsAtBeginningOf(RIL_BLOCK)) <|fim_suffix|>
word_num = 0;
tsv_str += "2\t" + std::to_string(page_num); // level 2 - block
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_BLOCK, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for block
}
if (res_it->IsAtBeginningOf(RIL_PARA)) {
par_num++;
line_num = 0;
word_num = 0;
tsv_str += "3\t" + std::to_string(page_num); // level 3 - paragraph
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_PARA, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for para
}
if (res_it->IsAtBeginningOf(RIL_TEXTLINE)) {
line_num++;
word_num = 0;
tsv_str += "4\t" + std::to_string(page_num); // level 4 - line
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_TEXTLINE, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for line
}
// Now, process the word...
int left, top, right, bottom;
res_it->BoundingBox(RIL_WORD, &left, &top, &right, &bottom);
word_num++;
tsv_str += "5\t" + std::to_string(page_num); // level 5 - word
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
tsv_str += "\t" + std::to_string(left);
tsv_str += "\t" + std::to_string(top);
tsv_str += "\t" + std::to_string(right - left);
tsv_str += "\t" + std::to_string(bottom - top);
tsv_str += "\t" + std::to_string(res_it<|fim_middle|>{
block_num++;
par_num = 0;
line_num = 0;
|
e.\n");
ASSERT_HOST(false);
default:
break;
}
const std::unique_ptr<const char[]> para_text(it->GetUTF8Text(RIL_PARA));
text += para_text.get();
} while (it->Next(RIL_PARA));
return copy_string(text);
}
static void AddBoxToTSV(const PageIterator *it, PageIteratorLevel level, std::string &text) {
int left, top, right, bottom;
it->BoundingBox(level, &left, &top, &right, &bottom);
text += "\t" + std::to_string(left);
text += "\t" + std::to_string(top);
text += "\t" + std::to_string(right - left);
text += "\t" + std::to_string(bottom - top);
}
/**
* Make a TSV-formatted string from the internal data structures.
* page_number is 0-based but will appear in the output as 1-based.
* Returned string must be freed with the delete [] operator.
*/
char *TessBaseAPI::GetTSVText(int page_number) {
if (tesseract_ == nullptr || (page_res_ == nullptr && Recognize(nullptr) < 0)) {
return nullptr;
}
#if !defined(NDEBUG)
int lcnt = 1, bcnt = 1, pcnt = 1, wcnt = 1;
#endif
int page_id = page_number + 1; // we use 1-based page numbers.
int page_num = page_id;
int block_num = 0;
int par_num = 0;
int line_num = 0;
int word_num = 0;
std::string tsv_str;
tsv_str += "1\t" + std::to_string(page_num); // level 1 - page
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
tsv_str += "\t" + std::to_string(rect_left_);
tsv_str += "\t" + std::to_string(rect_top_);
tsv_str += "\t" + std::to_string(rect_width_);
tsv_str += "\t" + std::to_string(rect_height_);
tsv_str += "\t-1\t\n";
const std::unique_ptr</*non-const*/ ResultIterator> res_it(GetIterator());
while (!res_it->Empty(RIL_BLOCK)) {
if (res_it->Empty(RIL_WORD)) {
res_it->Next(RIL_WORD);
continue;
}
// Add rows for any new block/paragraph/textline.
if (res_it->IsAtBeginningOf(RIL_BLOCK))
|
{
block_num++;
par_num = 0;
line_num = 0;
|
word_num = 0;
tsv_str += "2\t" + std::to_string(page_num); // level 2 - block
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_BLOCK, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for block
}
if (res_it->IsAtBeginningOf(RIL_PARA)) {
par_num++;
line_num = 0;
word_num = 0;
tsv_str += "3\t" + std::to_string(page_num); // level 3 - paragraph
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_PARA, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for para
}
if (res_it->IsAtBeginningOf(RIL_TEXTLINE)) {
line_num++;
word_num = 0;
tsv_str += "4\t" + std::to_string(page_num); // level 4 - line
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_TEXTLINE, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for line
}
// Now, process the word...
int left, top, right, bottom;
res_it->BoundingBox(RIL_WORD, &left, &top, &right, &bottom);
word_num++;
tsv_str += "5\t" + std::to_string(page_num); // level 5 - word
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
tsv_str += "\t" + std::to_string(left);
tsv_str += "\t" + std::to_string(top);
tsv_str += "\t" + std::to_string(right - left);
tsv_str += "\t" + std::to_string(bottom - top);
tsv_str += "\t" + std::to_string(res_it
|
ast_based
|
<|fim_prefix|>"Animation Paste Keys"));
bool same_track = true;
bool all_compatible = true;
for (int i = 0; i < editor->key_clipboard.keys.size(); i++) {
const AnimationTrackEditor::KeyClipboard::Key key = editor->key_clipboard.keys[i];
if (key.track != 0) {
same_track = false;
break;
}
if (!editor->_is_track_compatible(selected_track, key.value.get_type(), key.track_type)) {
all_compatible = false;
break;
}
}
ERR_FAIL_COND_MSG(!all_compatible, "Paste failed: Not all animation keys were compatible with their target tracks");
if (!same_track) {
WARN_PRINT("Pasted animation keys from multiple tracks into single Bezier track");
}
List<Pair<int, float>> new_selection_values;
for (int i = 0; i < editor->key_clipboard.keys.size(); i++) {
const AnimationTrackEditor::KeyClipboard::Key key = editor->key_clipboard.keys[i];
float insert_pos = p_ofs_valid ? p_ofs : timeline->get_play_position();
if (p_ofs_valid) {
if (editor->snap_keys->is_pressed() && editor->step->get_value() != 0) {
insert_pos = editor->snap_time(insert_pos);
}
}
float dst_time = key.time + insert_pos;
int existing_idx = animation->track_find_key(selected_track, dst_time, Animation::FIND_MODE_APPROX);
Variant value = key.value;
if (key.track_type != Animation::TYPE_BEZIER) {
value = animation->make_default_bezier_key(key.value);
}
undo_redo->add_do_method(animation.ptr(), "track_insert_key", selected_track, dst_time, value, key.transition);
undo_redo->add_undo_method(animation.ptr(), "track_remove_key_at_time", selected_track, dst_time);
Pair<int, float> p;
p.first = selected_track;
p.second = dst_time;
new_selection_values.push_back(p);
if (existing_idx != -1) {
undo_redo->add_undo_method(animation.ptr(), "track_insert_key", selected_track, dst_time, animation->track_get_key_value(selected_track, existing_idx), animation->track_get_key_transition(selected_track, existing_idx));
}
}
<|fim_suffix|>;
undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation);
// Reselect pasted.
int i = 0;
for (const Pair<int, float> &E : new_selection_values) {
undo_redo->add_do_method(this, "_select_at_anim", animation, E.first, E.second, i == 0);
i++;
}
i = 0;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, animation->track_get_key_time(E->get().first, E->get().second), i == 0);
i++;
}
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->add_do_method(this, "queue_redraw");
undo_redo->add_undo_method(this, "queue_redraw");
undo_redo->commit_action();
}
}
void AnimationBezierTrackEdit::delete_selection() {
if (selection.size()) {
EditorUndoRedoManager *undo_redo = EditorUndoRedoManager::get_singleton();
undo_redo->create_action(TTR("Animation Delete Keys"));
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
undo_redo->add_do_method(animation.ptr(), "track_remove_key", E->get().first, E->get().second);
undo_redo->add_undo_method(animation.ptr(), "track_insert_key", E->get().first, animation->track_get_key_time(E->get().first, E->get().second), animation->track_get_key_value(E->get().first, E->get().second), 1);
}
undo_redo->add_do_method(this, "_clear_selection_for_anim", animation);
undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation);
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->commit_action();
//selection.clear();
}
}
void AnimationBezierTrackEdit::_bezier_track_insert_key_at_anim(const Ref<Animati<|fim_middle|>undo_redo->add_do_method(this, "_clear_selection_for_anim", animation)
|
"Animation Paste Keys"));
bool same_track = true;
bool all_compatible = true;
for (int i = 0; i < editor->key_clipboard.keys.size(); i++) {
const AnimationTrackEditor::KeyClipboard::Key key = editor->key_clipboard.keys[i];
if (key.track != 0) {
same_track = false;
break;
}
if (!editor->_is_track_compatible(selected_track, key.value.get_type(), key.track_type)) {
all_compatible = false;
break;
}
}
ERR_FAIL_COND_MSG(!all_compatible, "Paste failed: Not all animation keys were compatible with their target tracks");
if (!same_track) {
WARN_PRINT("Pasted animation keys from multiple tracks into single Bezier track");
}
List<Pair<int, float>> new_selection_values;
for (int i = 0; i < editor->key_clipboard.keys.size(); i++) {
const AnimationTrackEditor::KeyClipboard::Key key = editor->key_clipboard.keys[i];
float insert_pos = p_ofs_valid ? p_ofs : timeline->get_play_position();
if (p_ofs_valid) {
if (editor->snap_keys->is_pressed() && editor->step->get_value() != 0) {
insert_pos = editor->snap_time(insert_pos);
}
}
float dst_time = key.time + insert_pos;
int existing_idx = animation->track_find_key(selected_track, dst_time, Animation::FIND_MODE_APPROX);
Variant value = key.value;
if (key.track_type != Animation::TYPE_BEZIER) {
value = animation->make_default_bezier_key(key.value);
}
undo_redo->add_do_method(animation.ptr(), "track_insert_key", selected_track, dst_time, value, key.transition);
undo_redo->add_undo_method(animation.ptr(), "track_remove_key_at_time", selected_track, dst_time);
Pair<int, float> p;
p.first = selected_track;
p.second = dst_time;
new_selection_values.push_back(p);
if (existing_idx != -1) {
undo_redo->add_undo_method(animation.ptr(), "track_insert_key", selected_track, dst_time, animation->track_get_key_value(selected_track, existing_idx), animation->track_get_key_transition(selected_track, existing_idx));
}
}
|
undo_redo->add_do_method(this, "_clear_selection_for_anim", animation)
|
;
undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation);
// Reselect pasted.
int i = 0;
for (const Pair<int, float> &E : new_selection_values) {
undo_redo->add_do_method(this, "_select_at_anim", animation, E.first, E.second, i == 0);
i++;
}
i = 0;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, animation->track_get_key_time(E->get().first, E->get().second), i == 0);
i++;
}
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->add_do_method(this, "queue_redraw");
undo_redo->add_undo_method(this, "queue_redraw");
undo_redo->commit_action();
}
}
void AnimationBezierTrackEdit::delete_selection() {
if (selection.size()) {
EditorUndoRedoManager *undo_redo = EditorUndoRedoManager::get_singleton();
undo_redo->create_action(TTR("Animation Delete Keys"));
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
undo_redo->add_do_method(animation.ptr(), "track_remove_key", E->get().first, E->get().second);
undo_redo->add_undo_method(animation.ptr(), "track_insert_key", E->get().first, animation->track_get_key_time(E->get().first, E->get().second), animation->track_get_key_value(E->get().first, E->get().second), 1);
}
undo_redo->add_do_method(this, "_clear_selection_for_anim", animation);
undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation);
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->commit_action();
//selection.clear();
}
}
void AnimationBezierTrackEdit::_bezier_track_insert_key_at_anim(const Ref<Animati
|
ast_based
|
<|fim_prefix|>v->set_shift_pressed(shift_mem);
}
if (p_keycode != Key::ALT) {
ev->set_alt_pressed(alt_mem);
}
if (p_keycode != Key::META) {
ev->set_meta_pressed(meta_mem);
}
if (p_keycode != Key::CTRL) {
ev->set_ctrl_pressed(control_mem);
}
}
void AndroidInputHandler::process_key_event(int p_physical_keycode, int p_unicode, int p_key_label, bool p_pressed, bool p_echo) {
static char32_t prev_wc = 0;
char32_t unicode = p_unicode;
if ((p_unicode & 0xfffffc00) == 0xd800) {
if (prev_wc != 0) {
ERR_PRINT("invalid utf16 surrogate input");
}
prev_wc = unicode;
return; // Skip surrogate.
} else if ((unicode & 0xfffffc00) == 0xdc00) {
if (prev_wc == 0) {
ERR_PRINT("invalid utf16 surrogate input");
return; // Skip invalid surrogate.
}
unicode = (prev_wc << 10UL) + unicode - ((0xd800 << 10UL) + 0xdc00 - 0x10000);
prev_wc = 0;
} else {
prev_wc = 0;
}
Ref<InputEventKey> ev;
ev.instantiate();
Key physical_keycode = godot_code_from_android_code(p_physical_keycode);
Key keycode;
if (unicode == '\b') { // 0x08
keycode = Key::BACKSPACE;
} else if (unicode == '\t') { // 0x09
keycode = Key::TAB;
} else if (unicode == '\n') { // 0x0A
keycode = Key::ENTER;
} else if (unicode == 0x1B) {
keycode = Key::ESCAPE;
} else if (unicode == 0x1F) {
keycode = Key::KEY_DELETE;
} else {
keycode = fix_keycode(unicode, physical_keycode);
}
switch (physical_keycode) {
case Key::SHIFT: {
shift_mem = p_pressed;
} break;
case Key::ALT: {
alt_mem = p_pressed;
} break;
case Key::CTRL: {
control_mem = p_pressed;
} break;
case Key::META: {
meta_mem = p_pressed;
} break;
default:
break;
}
ev->set_keycode(keycode);
ev->set_physical_keycode(physical_keycode);
ev->set_key_label(fix_key_label(p_key_label, keycode));
ev->set_unicode(fix_unicode(unicode));
ev->set_location(godot_location_from_android_code(p_physical_keycode));
ev->set_pressed(p_pressed);
ev->set_echo(p_echo);
_set_key_modifier_state(ev, keycode);
<|fim_suffix|>
Input::get_singleton()->parse_input_event(ev);
}
void AndroidInputHandler::_cancel_all_touch() {
_parse_all_touch(false, true);
touch.clear();
}
void AndroidInputHandler::_parse_all_touch(bool p_pressed, bool p_canceled, bool p_double_tap) {
if (touch.size()) {
//end all if exist
for (int i = 0; i < touch.size(); i++) {
Ref<InputEventScreenTouch> ev;
ev.instantiate();
ev->set_index(touch[i].id);
ev->set_pressed(p_pressed);
ev->set_canceled(p_canceled);
ev->set_position(touch[i].pos);
ev->set_double_tap(p_double_tap);
Input::get_singleton()->parse_input_event(ev);
}
}
}
void AndroidInputHandler::_release_all_touch() {
_parse_all_touch(false, false);
touch.clear();
}
void AndroidInputHandler::process_touch_event(int p_event, int p_pointer, const Vector<TouchPos> &p_points, bool p_double_tap) {
switch (p_event) {
case AMOTION_EVENT_ACTION_DOWN: { //gesture begin
// Release any remaining touches or mouse event
_release_mouse_event_info();
_release_all_touch();
touch.resize(p_points.size());
for (int i = 0; i < p_points.size(); i++) {
touch.write[i].id = p_points[i].id;
touch.write[i].pos = p_points[i].pos;
touch.write[i].pressure = p_points[i].pressure;
touch.write[i].tilt = p_points[i].tilt;
}
//send touch
_parse_all_touch(true, false, p_double_tap);
} break;
case AMOTION_EVENT_ACTION_MOVE: { //motion
if (touch.size() != p_points.size()) {
return;
}
for (int i = 0; i < touch.size(); i++) {
int idx = -1;
for (int j = 0; j < p_points.size(); j++) {
if (touch[i].id == p_points[j].id) {
idx = j;
break;
}
}
ERR_CONTINUE(idx == -1);
if (touch[i].pos == p_points[idx].pos) {
continue; // Don't move unnecessarily.
}
Ref<InputEventScreenDrag> ev;
ev.instantiate();
ev->set_index(touch[i].id);
ev->set_position(p_points[idx].pos);
ev->set_relative(p_points[idx].pos - touch[i].pos);
ev->set_relative_s<|fim_middle|>if (p_physical_keycode == AKEYCODE_BACK && p_pressed) {
if (DisplayServerAndroid *dsa = Object::cast_to<DisplayServerAndroid>(DisplayServer::get_singleton())) {
dsa->send_window_event(DisplayServer::WINDOW_EVENT_GO_BACK_REQUEST, true);
}
}
|
v->set_shift_pressed(shift_mem);
}
if (p_keycode != Key::ALT) {
ev->set_alt_pressed(alt_mem);
}
if (p_keycode != Key::META) {
ev->set_meta_pressed(meta_mem);
}
if (p_keycode != Key::CTRL) {
ev->set_ctrl_pressed(control_mem);
}
}
void AndroidInputHandler::process_key_event(int p_physical_keycode, int p_unicode, int p_key_label, bool p_pressed, bool p_echo) {
static char32_t prev_wc = 0;
char32_t unicode = p_unicode;
if ((p_unicode & 0xfffffc00) == 0xd800) {
if (prev_wc != 0) {
ERR_PRINT("invalid utf16 surrogate input");
}
prev_wc = unicode;
return; // Skip surrogate.
} else if ((unicode & 0xfffffc00) == 0xdc00) {
if (prev_wc == 0) {
ERR_PRINT("invalid utf16 surrogate input");
return; // Skip invalid surrogate.
}
unicode = (prev_wc << 10UL) + unicode - ((0xd800 << 10UL) + 0xdc00 - 0x10000);
prev_wc = 0;
} else {
prev_wc = 0;
}
Ref<InputEventKey> ev;
ev.instantiate();
Key physical_keycode = godot_code_from_android_code(p_physical_keycode);
Key keycode;
if (unicode == '\b') { // 0x08
keycode = Key::BACKSPACE;
} else if (unicode == '\t') { // 0x09
keycode = Key::TAB;
} else if (unicode == '\n') { // 0x0A
keycode = Key::ENTER;
} else if (unicode == 0x1B) {
keycode = Key::ESCAPE;
} else if (unicode == 0x1F) {
keycode = Key::KEY_DELETE;
} else {
keycode = fix_keycode(unicode, physical_keycode);
}
switch (physical_keycode) {
case Key::SHIFT: {
shift_mem = p_pressed;
} break;
case Key::ALT: {
alt_mem = p_pressed;
} break;
case Key::CTRL: {
control_mem = p_pressed;
} break;
case Key::META: {
meta_mem = p_pressed;
} break;
default:
break;
}
ev->set_keycode(keycode);
ev->set_physical_keycode(physical_keycode);
ev->set_key_label(fix_key_label(p_key_label, keycode));
ev->set_unicode(fix_unicode(unicode));
ev->set_location(godot_location_from_android_code(p_physical_keycode));
ev->set_pressed(p_pressed);
ev->set_echo(p_echo);
_set_key_modifier_state(ev, keycode);
|
if (p_physical_keycode == AKEYCODE_BACK && p_pressed) {
if (DisplayServerAndroid *dsa = Object::cast_to<DisplayServerAndroid>(DisplayServer::get_singleton())) {
dsa->send_window_event(DisplayServer::WINDOW_EVENT_GO_BACK_REQUEST, true);
}
}
|
Input::get_singleton()->parse_input_event(ev);
}
void AndroidInputHandler::_cancel_all_touch() {
_parse_all_touch(false, true);
touch.clear();
}
void AndroidInputHandler::_parse_all_touch(bool p_pressed, bool p_canceled, bool p_double_tap) {
if (touch.size()) {
//end all if exist
for (int i = 0; i < touch.size(); i++) {
Ref<InputEventScreenTouch> ev;
ev.instantiate();
ev->set_index(touch[i].id);
ev->set_pressed(p_pressed);
ev->set_canceled(p_canceled);
ev->set_position(touch[i].pos);
ev->set_double_tap(p_double_tap);
Input::get_singleton()->parse_input_event(ev);
}
}
}
void AndroidInputHandler::_release_all_touch() {
_parse_all_touch(false, false);
touch.clear();
}
void AndroidInputHandler::process_touch_event(int p_event, int p_pointer, const Vector<TouchPos> &p_points, bool p_double_tap) {
switch (p_event) {
case AMOTION_EVENT_ACTION_DOWN: { //gesture begin
// Release any remaining touches or mouse event
_release_mouse_event_info();
_release_all_touch();
touch.resize(p_points.size());
for (int i = 0; i < p_points.size(); i++) {
touch.write[i].id = p_points[i].id;
touch.write[i].pos = p_points[i].pos;
touch.write[i].pressure = p_points[i].pressure;
touch.write[i].tilt = p_points[i].tilt;
}
//send touch
_parse_all_touch(true, false, p_double_tap);
} break;
case AMOTION_EVENT_ACTION_MOVE: { //motion
if (touch.size() != p_points.size()) {
return;
}
for (int i = 0; i < touch.size(); i++) {
int idx = -1;
for (int j = 0; j < p_points.size(); j++) {
if (touch[i].id == p_points[j].id) {
idx = j;
break;
}
}
ERR_CONTINUE(idx == -1);
if (touch[i].pos == p_points[idx].pos) {
continue; // Don't move unnecessarily.
}
Ref<InputEventScreenDrag> ev;
ev.instantiate();
ev->set_index(touch[i].id);
ev->set_position(p_points[idx].pos);
ev->set_relative(p_points[idx].pos - touch[i].pos);
ev->set_relative_s
|
ast_based
|
<|fim_prefix|>eturn *ptr;
}
static void print_row(struct ggml_tensor * probs, int i) {
for (int k = 0; k < probs->ne[0]; ++k) {
float p = get_f32_2d(probs, k, i);
LOG(" %f", p);
}
LOG("\n");
}
static void print_matrix(struct ggml_tensor * probs) {
assert(ggml_is_matrix(probs));
for (int i = 0; i < probs->ne[1]; ++i) {
for (int k = 0; k < probs->ne[0]; ++k) {
float p = get_f32_2d(probs, k, i);
LOG(" %.2f", p);
}
LOG("\n");
}
}
struct my_llama_file {
// use FILE * so we don't have to re-open the file to mmap
FILE * fp;
size_t size;
my_llama_file(const char * fname, const char * mode) {
fp = std::fopen(fname, mode);
if (fp == NULL) {
size = 0;
} else {
seek(0, SEEK_END);
size = tell();
seek(0, SEEK_SET);
}
}
size_t tell() const {
#ifdef _WIN32
__int64 ret = _ftelli64(fp);
#else
long ret = std::ftell(fp);
#endif
GGML_ASSERT(ret != -1); // this really shouldn't fail
return (size_t) ret;
}
void seek(size_t offset, int whence) {
#ifdef _WIN32
int ret = _fseeki64(fp, (__int64) offset, whence);
#else
int ret = std::fseek(fp, (long) offset, whence);
#endif
GGML_ASSERT(ret == 0); // same
}
void read_raw(void * ptr, size_t size) {
if (size == 0) {
return;
}
errno = 0;
std::size_t ret = std::fread(ptr, size, 1, fp);
if (ferror(fp)) {
die_fmt("fread failed: %s", strerror(errno));
}
if (ret != 1) {
die("unexpectedly reached end of file");
}
}
std::uint32_t read_u32() {
std::uint32_t ret;
read_raw(&ret, sizeof(ret));
return ret;
}
std::float_t read_f32() {
std::float_t ret;
read_raw(&ret, sizeof(ret));
return ret;
}
std::string read_string(std::uint32_t len) <|fim_suffix|>
}
~my_llama_file() {
if (fp) {
std::fclose(fp);
}
}
};
static bool is_ggml_file(const char * filename) {
my_llama_file file(filename, "rb");
if (file.size < 4) {
return false;
}
std::string magic = file.read_string(4);
return magic == GGUF_MAGIC;
}
static std::string llama_escape_whitespaces(const std::string & text) {
std::ostringstream out;
for (char c : text) {
if (c == ' ') out << "\xe2\x96\x81";
else out << c;
}
return out.str();
}
static void load_vocab(const char * filename, const Config * config, struct my_llama_vocab * vocab) {
if (is_ggml_file(filename)) {
LOG_INF("%s: Loading vocabulary from gguf file %s\n", __func__, filename);
struct ggml_context * ctx_data = NULL;
struct gguf_init_params params = {
/*.no_alloc = */ false,
/*.ctx = */ &ctx_data,
};
struct gguf_context * ctx = gguf_init_from_file(filename, params);
GGML_ASSERT(ctx != NULL);
const int model_idx = gguf_find_key(ctx, KV_TOKENIZER_MODEL);
GGML_ASSERT(model_idx >= 0);
std::string tokenizer_name = gguf_get_val_str(ctx, model_idx);
GGML_ASSERT(tokenizer_name == TOKENIZER_NAME);
const int token_idx = gguf_find_key(ctx, KV_TOKENIZER_LIST);
GGML_ASSERT(token_idx >= 0);
const int score_idx = gguf_find_key(ctx, KV_TOKENIZER_SCORES);
GGML_ASSERT(score_idx >= 0);
const float * scores = (const float * ) gguf_get_arr_data(ctx, score_idx);
const int toktype_idx = gguf_find_key(ctx, KV_TOKENIZER_TOKEN_TYPE);
GGML_ASSERT(toktype_idx >= 0);
const int * toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx);
const uint32_t n_vocab = gguf_get_arr_n(ctx, token_idx);
if (n_vocab != static_cast<uint32_t>(config->vocab_size)) {
die_fmt("vocab size mismatch: (gguf) %u != (llama2c) %d", n_vocab, c<|fim_middle|>{
std::vector<char> chars(len);
read_raw(chars.data(), len);
return std::string(chars.data(), len);
|
eturn *ptr;
}
static void print_row(struct ggml_tensor * probs, int i) {
for (int k = 0; k < probs->ne[0]; ++k) {
float p = get_f32_2d(probs, k, i);
LOG(" %f", p);
}
LOG("\n");
}
static void print_matrix(struct ggml_tensor * probs) {
assert(ggml_is_matrix(probs));
for (int i = 0; i < probs->ne[1]; ++i) {
for (int k = 0; k < probs->ne[0]; ++k) {
float p = get_f32_2d(probs, k, i);
LOG(" %.2f", p);
}
LOG("\n");
}
}
struct my_llama_file {
// use FILE * so we don't have to re-open the file to mmap
FILE * fp;
size_t size;
my_llama_file(const char * fname, const char * mode) {
fp = std::fopen(fname, mode);
if (fp == NULL) {
size = 0;
} else {
seek(0, SEEK_END);
size = tell();
seek(0, SEEK_SET);
}
}
size_t tell() const {
#ifdef _WIN32
__int64 ret = _ftelli64(fp);
#else
long ret = std::ftell(fp);
#endif
GGML_ASSERT(ret != -1); // this really shouldn't fail
return (size_t) ret;
}
void seek(size_t offset, int whence) {
#ifdef _WIN32
int ret = _fseeki64(fp, (__int64) offset, whence);
#else
int ret = std::fseek(fp, (long) offset, whence);
#endif
GGML_ASSERT(ret == 0); // same
}
void read_raw(void * ptr, size_t size) {
if (size == 0) {
return;
}
errno = 0;
std::size_t ret = std::fread(ptr, size, 1, fp);
if (ferror(fp)) {
die_fmt("fread failed: %s", strerror(errno));
}
if (ret != 1) {
die("unexpectedly reached end of file");
}
}
std::uint32_t read_u32() {
std::uint32_t ret;
read_raw(&ret, sizeof(ret));
return ret;
}
std::float_t read_f32() {
std::float_t ret;
read_raw(&ret, sizeof(ret));
return ret;
}
std::string read_string(std::uint32_t len)
|
{
std::vector<char> chars(len);
read_raw(chars.data(), len);
return std::string(chars.data(), len);
|
}
~my_llama_file() {
if (fp) {
std::fclose(fp);
}
}
};
static bool is_ggml_file(const char * filename) {
my_llama_file file(filename, "rb");
if (file.size < 4) {
return false;
}
std::string magic = file.read_string(4);
return magic == GGUF_MAGIC;
}
static std::string llama_escape_whitespaces(const std::string & text) {
std::ostringstream out;
for (char c : text) {
if (c == ' ') out << "\xe2\x96\x81";
else out << c;
}
return out.str();
}
static void load_vocab(const char * filename, const Config * config, struct my_llama_vocab * vocab) {
if (is_ggml_file(filename)) {
LOG_INF("%s: Loading vocabulary from gguf file %s\n", __func__, filename);
struct ggml_context * ctx_data = NULL;
struct gguf_init_params params = {
/*.no_alloc = */ false,
/*.ctx = */ &ctx_data,
};
struct gguf_context * ctx = gguf_init_from_file(filename, params);
GGML_ASSERT(ctx != NULL);
const int model_idx = gguf_find_key(ctx, KV_TOKENIZER_MODEL);
GGML_ASSERT(model_idx >= 0);
std::string tokenizer_name = gguf_get_val_str(ctx, model_idx);
GGML_ASSERT(tokenizer_name == TOKENIZER_NAME);
const int token_idx = gguf_find_key(ctx, KV_TOKENIZER_LIST);
GGML_ASSERT(token_idx >= 0);
const int score_idx = gguf_find_key(ctx, KV_TOKENIZER_SCORES);
GGML_ASSERT(score_idx >= 0);
const float * scores = (const float * ) gguf_get_arr_data(ctx, score_idx);
const int toktype_idx = gguf_find_key(ctx, KV_TOKENIZER_TOKEN_TYPE);
GGML_ASSERT(toktype_idx >= 0);
const int * toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx);
const uint32_t n_vocab = gguf_get_arr_n(ctx, token_idx);
if (n_vocab != static_cast<uint32_t>(config->vocab_size)) {
die_fmt("vocab size mismatch: (gguf) %u != (llama2c) %d", n_vocab, c
|
ast_based
|
<|fim_prefix|>KEY_SET_HANDLE_FREE);
menu->add_icon_item(get_editor_theme_icon(SNAME("BezierHandlesLinear")), TTR("Make Handles Linear"), MENU_KEY_SET_HANDLE_LINEAR);
menu->add_icon_item(get_editor_theme_icon(SNAME("BezierHandlesBalanced")), TTR("Make Handles Balanced"), MENU_KEY_SET_HANDLE_BALANCED);
menu->add_icon_item(get_editor_theme_icon(SNAME("BezierHandlesMirror")), TTR("Make Handles Mirrored"), MENU_KEY_SET_HANDLE_MIRRORED);
menu->add_separator();
menu->add_icon_item(get_editor_theme_icon(SNAME("BezierHandlesBalanced")), TTR("Make Handles Balanced (Auto Tangent)"), MENU_KEY_SET_HANDLE_AUTO_BALANCED);
menu->add_icon_item(get_editor_theme_icon(SNAME("BezierHandlesMirror")), TTR("Make Handles Mirrored (Auto Tangent)"), MENU_KEY_SET_HANDLE_AUTO_MIRRORED);
}
if (menu->get_item_count()) {
menu->reset_size();
menu->set_position(popup_pos);
menu->popup();
}
}
}
}
if (mb.is_valid() && mb->is_pressed() && mb->get_button_index() == MouseButton::LEFT) {
Point2 pos = mb->get_position();
bool no_mod_key_pressed = !mb->is_alt_pressed() && !mb->is_shift_pressed() && !mb->is_command_or_control_pressed();
if (mb->is_double_click() && !moving_selection && no_mod_key_pressed) {
int x = pos.x - timeline->get_name_limit();
float ofs = x / timeline->get_zoom_scale() + timeline->get_value();
emit_signal(SNAME("timeline_changed"), ofs, false);
}
for (const KeyValue<int, Rect2> &E : subtracks) {
if (E.value.has_point(mb->get_position())) {
if (!locked_tracks.has(E.key) && !hidden_tracks.has(E.key)) {
set_animation_and_track(animation, E.key, read_only);
_clear_selection();
}
return;
}
}
for (const KeyValue<int, RBMap<int, Rect2>> &E : subtrack_icons) {
int track = E.key;
RBMap<int, Rect2> track_icons = E.value;
for (const KeyValue<int, Rect2> &I : track_icons) {
if (I.value.has_point(mb->get_position())) {
if (I.key == REMOVE_ICON) {
if (!read_only) {
<|fim_suffix|>
undo_redo->create_action("Remove Bezier Track", UndoRedo::MERGE_DISABLE, animation.ptr());
undo_redo->add_do_method(this, "_update_locked_tracks_after", track);
undo_redo->add_do_method(this, "_update_hidden_tracks_after", track);
undo_redo->add_do_method(animation.ptr(), "remove_track", track);
undo_redo->add_undo_method(animation.ptr(), "add_track", Animation::TrackType::TYPE_BEZIER, track);
undo_redo->add_undo_method(animation.ptr(), "track_set_path", track, animation->track_get_path(track));
for (int i = 0; i < animation->track_get_key_count(track); ++i) {
undo_redo->add_undo_method(
this,
"_bezier_track_insert_key_at_anim",
animation,
track,
animation->track_get_key_time(track, i),
animation->bezier_track_get_key_value(track, i),
animation->bezier_track_get_key_in_handle(track, i),
animation->bezier_track_get_key_out_handle(track, i),
animation->bezier_track_get_key_handle_mode(track, i));
}
undo_redo->commit_action();
selected_track = CLAMP(selected_track, 0, animation->get_track_count() - 1);
}
return;
} else if (I.key == LOCK_ICON) {
if (locked_tracks.has(track)) {
locked_tracks.erase(track);
} else {
locked_tracks.insert(track);
if (selected_track == track) {
for (int i = 0; i < animation->get_track_count(); ++i) {
if (!locked_tracks.has(i) && animation->track_get_type(i) == Animation::TrackType::TYPE_BEZIER) {
set_animation_and_track(animation, i, read_only);
break;
}
}
}
}
queue_redraw();
return;
} else if (I.key == VISIBILITY_ICON) {
if (hidden_tracks.has(track)) {
hidden_tracks.erase(track);
} else {
hidden_tracks.insert(track);
if (selected_track == track) {
for (int i = 0; i < animation->get_track_count(); +<|fim_middle|>EditorUndoRedoManager *undo_redo = EditorUndoRedoManager::get_singleton();
|
KEY_SET_HANDLE_FREE);
menu->add_icon_item(get_editor_theme_icon(SNAME("BezierHandlesLinear")), TTR("Make Handles Linear"), MENU_KEY_SET_HANDLE_LINEAR);
menu->add_icon_item(get_editor_theme_icon(SNAME("BezierHandlesBalanced")), TTR("Make Handles Balanced"), MENU_KEY_SET_HANDLE_BALANCED);
menu->add_icon_item(get_editor_theme_icon(SNAME("BezierHandlesMirror")), TTR("Make Handles Mirrored"), MENU_KEY_SET_HANDLE_MIRRORED);
menu->add_separator();
menu->add_icon_item(get_editor_theme_icon(SNAME("BezierHandlesBalanced")), TTR("Make Handles Balanced (Auto Tangent)"), MENU_KEY_SET_HANDLE_AUTO_BALANCED);
menu->add_icon_item(get_editor_theme_icon(SNAME("BezierHandlesMirror")), TTR("Make Handles Mirrored (Auto Tangent)"), MENU_KEY_SET_HANDLE_AUTO_MIRRORED);
}
if (menu->get_item_count()) {
menu->reset_size();
menu->set_position(popup_pos);
menu->popup();
}
}
}
}
if (mb.is_valid() && mb->is_pressed() && mb->get_button_index() == MouseButton::LEFT) {
Point2 pos = mb->get_position();
bool no_mod_key_pressed = !mb->is_alt_pressed() && !mb->is_shift_pressed() && !mb->is_command_or_control_pressed();
if (mb->is_double_click() && !moving_selection && no_mod_key_pressed) {
int x = pos.x - timeline->get_name_limit();
float ofs = x / timeline->get_zoom_scale() + timeline->get_value();
emit_signal(SNAME("timeline_changed"), ofs, false);
}
for (const KeyValue<int, Rect2> &E : subtracks) {
if (E.value.has_point(mb->get_position())) {
if (!locked_tracks.has(E.key) && !hidden_tracks.has(E.key)) {
set_animation_and_track(animation, E.key, read_only);
_clear_selection();
}
return;
}
}
for (const KeyValue<int, RBMap<int, Rect2>> &E : subtrack_icons) {
int track = E.key;
RBMap<int, Rect2> track_icons = E.value;
for (const KeyValue<int, Rect2> &I : track_icons) {
if (I.value.has_point(mb->get_position())) {
if (I.key == REMOVE_ICON) {
if (!read_only) {
|
EditorUndoRedoManager *undo_redo = EditorUndoRedoManager::get_singleton();
|
undo_redo->create_action("Remove Bezier Track", UndoRedo::MERGE_DISABLE, animation.ptr());
undo_redo->add_do_method(this, "_update_locked_tracks_after", track);
undo_redo->add_do_method(this, "_update_hidden_tracks_after", track);
undo_redo->add_do_method(animation.ptr(), "remove_track", track);
undo_redo->add_undo_method(animation.ptr(), "add_track", Animation::TrackType::TYPE_BEZIER, track);
undo_redo->add_undo_method(animation.ptr(), "track_set_path", track, animation->track_get_path(track));
for (int i = 0; i < animation->track_get_key_count(track); ++i) {
undo_redo->add_undo_method(
this,
"_bezier_track_insert_key_at_anim",
animation,
track,
animation->track_get_key_time(track, i),
animation->bezier_track_get_key_value(track, i),
animation->bezier_track_get_key_in_handle(track, i),
animation->bezier_track_get_key_out_handle(track, i),
animation->bezier_track_get_key_handle_mode(track, i));
}
undo_redo->commit_action();
selected_track = CLAMP(selected_track, 0, animation->get_track_count() - 1);
}
return;
} else if (I.key == LOCK_ICON) {
if (locked_tracks.has(track)) {
locked_tracks.erase(track);
} else {
locked_tracks.insert(track);
if (selected_track == track) {
for (int i = 0; i < animation->get_track_count(); ++i) {
if (!locked_tracks.has(i) && animation->track_get_type(i) == Animation::TrackType::TYPE_BEZIER) {
set_animation_and_track(animation, i, read_only);
break;
}
}
}
}
queue_redraw();
return;
} else if (I.key == VISIBILITY_ICON) {
if (hidden_tracks.has(track)) {
hidden_tracks.erase(track);
} else {
hidden_tracks.insert(track);
if (selected_track == track) {
for (int i = 0; i < animation->get_track_count(); +
|
ast_based
|
<|fim_prefix|> frame = 0;
pause();
emit_signal(SceneStringName(animation_finished));
return;
}
} else {
frame--;
}
_calc_frame_speed_scale();
frame_progress = 1.0;
queue_redraw();
emit_signal(SceneStringName(frame_changed));
}
double to_process = MIN(frame_progress / abs_speed, remaining);
frame_progress -= to_process * abs_speed;
remaining -= to_process;
}
i++;
if (i > fc) {
return; // Prevents freezing if to_process is each time much less than remaining.
}
}
} break;
case NOTIFICATION_DRAW: {
if (frames.is_null() || !frames->has_animation(animation)) {
return;
}
Ref<Texture2D> texture = frames->get_frame_texture(animation, frame);
if (texture.is_null()) {
return;
}
RID ci = get_canvas_item();
Size2 s = texture->get_size();
Point2 ofs = offset;
if (centered) {
ofs -= s / 2;
}
if (get_viewport() && get_viewport()->is_snap_2d_transforms_to_pixel_enabled()) {
ofs = (ofs + Point2(0.5, 0.5)).floor();
}
Rect2 dst_rect(ofs, s);
if (hflip) {
dst_rect.size.x = -dst_rect.size.x;
}
if (vflip) {
dst_rect.size.y = -dst_rect.size.y;
}
texture->draw_rect_region(ci, dst_rect, Rect2(Vector2(), texture->get_size()), Color(1, 1, 1), false);
} break;
}
}
void AnimatedSprite2D::set_sprite_frames(const Ref<SpriteFrames> &p_frames) {
if (frames == p_frames) {
return;
}
if (frames.is_valid()) {
frames->disconnect(CoreStringName(changed), callable_mp(this, &AnimatedSprite2D::_res_changed));
}
stop();
frames = p_frames;
if (frames.is_valid()) {
frames->connect(CoreStringName(changed), callable_mp(this, &AnimatedSprite2D::_res_changed));
List<StringName> al;
frames->get_animation_list(&al);
if (al.is_empty()) {
set_animation(StringName());
autoplay = String();
} else {
if (!frames->has_animation(animation)) {
set_animation(al.front()->get());<|fim_suffix|>
notify_property_list_changed();
queue_redraw();
update_configuration_warnings();
emit_signal("sprite_frames_changed");
}
Ref<SpriteFrames> AnimatedSprite2D::get_sprite_frames() const {
return frames;
}
void AnimatedSprite2D::set_frame(int p_frame) {
set_frame_and_progress(p_frame, std::signbit(get_playing_speed()) ? 1.0 : 0.0);
}
int AnimatedSprite2D::get_frame() const {
return frame;
}
void AnimatedSprite2D::set_frame_progress(real_t p_progress) {
frame_progress = p_progress;
}
real_t AnimatedSprite2D::get_frame_progress() const {
return frame_progress;
}
void AnimatedSprite2D::set_frame_and_progress(int p_frame, real_t p_progress) {
if (frames.is_null()) {
return;
}
bool has_animation = frames->has_animation(animation);
int end_frame = has_animation ? MAX(0, frames->get_frame_count(animation) - 1) : 0;
bool is_changed = frame != p_frame;
if (p_frame < 0) {
frame = 0;
} else if (has_animation && p_frame > end_frame) {
frame = end_frame;
} else {
frame = p_frame;
}
_calc_frame_speed_scale();
frame_progress = p_progress;
if (!is_changed) {
return; // No change, don't redraw.
}
queue_redraw();
emit_signal(SceneStringName(frame_changed));
}
void AnimatedSprite2D::set_speed_scale(float p_speed_scale) {
speed_scale = p_speed_scale;
}
float AnimatedSprite2D::get_speed_scale() const {
return speed_scale;
}
float AnimatedSprite2D::get_playing_speed() const {
if (!playing) {
return 0;
}
return speed_scale * custom_speed_scale;
}
void AnimatedSprite2D::set_centered(bool p_center) {
if (centered == p_center) {
return;
}
centered = p_center;
queue_redraw();
item_rect_changed();
}
bool AnimatedSprite2D::is_centered() const {
return centered;
}
void AnimatedSprite2D::set_offset(const Point2 &p_offset) {
if (offset == p_offset) {
return;
}
offset = p_offset;
queue_redraw();
item_rect_changed();
}
Point2 AnimatedSprite2D::get_offset() const {
return offset;
}
<|fim_middle|> }
if (!frames->has_animation(autoplay)) {
autoplay = String();
}
}
}
|
frame = 0;
pause();
emit_signal(SceneStringName(animation_finished));
return;
}
} else {
frame--;
}
_calc_frame_speed_scale();
frame_progress = 1.0;
queue_redraw();
emit_signal(SceneStringName(frame_changed));
}
double to_process = MIN(frame_progress / abs_speed, remaining);
frame_progress -= to_process * abs_speed;
remaining -= to_process;
}
i++;
if (i > fc) {
return; // Prevents freezing if to_process is each time much less than remaining.
}
}
} break;
case NOTIFICATION_DRAW: {
if (frames.is_null() || !frames->has_animation(animation)) {
return;
}
Ref<Texture2D> texture = frames->get_frame_texture(animation, frame);
if (texture.is_null()) {
return;
}
RID ci = get_canvas_item();
Size2 s = texture->get_size();
Point2 ofs = offset;
if (centered) {
ofs -= s / 2;
}
if (get_viewport() && get_viewport()->is_snap_2d_transforms_to_pixel_enabled()) {
ofs = (ofs + Point2(0.5, 0.5)).floor();
}
Rect2 dst_rect(ofs, s);
if (hflip) {
dst_rect.size.x = -dst_rect.size.x;
}
if (vflip) {
dst_rect.size.y = -dst_rect.size.y;
}
texture->draw_rect_region(ci, dst_rect, Rect2(Vector2(), texture->get_size()), Color(1, 1, 1), false);
} break;
}
}
void AnimatedSprite2D::set_sprite_frames(const Ref<SpriteFrames> &p_frames) {
if (frames == p_frames) {
return;
}
if (frames.is_valid()) {
frames->disconnect(CoreStringName(changed), callable_mp(this, &AnimatedSprite2D::_res_changed));
}
stop();
frames = p_frames;
if (frames.is_valid()) {
frames->connect(CoreStringName(changed), callable_mp(this, &AnimatedSprite2D::_res_changed));
List<StringName> al;
frames->get_animation_list(&al);
if (al.is_empty()) {
set_animation(StringName());
autoplay = String();
} else {
if (!frames->has_animation(animation)) {
set_animation(al.front()->get());
|
}
if (!frames->has_animation(autoplay)) {
autoplay = String();
}
}
}
|
notify_property_list_changed();
queue_redraw();
update_configuration_warnings();
emit_signal("sprite_frames_changed");
}
Ref<SpriteFrames> AnimatedSprite2D::get_sprite_frames() const {
return frames;
}
void AnimatedSprite2D::set_frame(int p_frame) {
set_frame_and_progress(p_frame, std::signbit(get_playing_speed()) ? 1.0 : 0.0);
}
int AnimatedSprite2D::get_frame() const {
return frame;
}
void AnimatedSprite2D::set_frame_progress(real_t p_progress) {
frame_progress = p_progress;
}
real_t AnimatedSprite2D::get_frame_progress() const {
return frame_progress;
}
void AnimatedSprite2D::set_frame_and_progress(int p_frame, real_t p_progress) {
if (frames.is_null()) {
return;
}
bool has_animation = frames->has_animation(animation);
int end_frame = has_animation ? MAX(0, frames->get_frame_count(animation) - 1) : 0;
bool is_changed = frame != p_frame;
if (p_frame < 0) {
frame = 0;
} else if (has_animation && p_frame > end_frame) {
frame = end_frame;
} else {
frame = p_frame;
}
_calc_frame_speed_scale();
frame_progress = p_progress;
if (!is_changed) {
return; // No change, don't redraw.
}
queue_redraw();
emit_signal(SceneStringName(frame_changed));
}
void AnimatedSprite2D::set_speed_scale(float p_speed_scale) {
speed_scale = p_speed_scale;
}
float AnimatedSprite2D::get_speed_scale() const {
return speed_scale;
}
float AnimatedSprite2D::get_playing_speed() const {
if (!playing) {
return 0;
}
return speed_scale * custom_speed_scale;
}
void AnimatedSprite2D::set_centered(bool p_center) {
if (centered == p_center) {
return;
}
centered = p_center;
queue_redraw();
item_rect_changed();
}
bool AnimatedSprite2D::is_centered() const {
return centered;
}
void AnimatedSprite2D::set_offset(const Point2 &p_offset) {
if (offset == p_offset) {
return;
}
offset = p_offset;
queue_redraw();
item_rect_changed();
}
Point2 AnimatedSprite2D::get_offset() const {
return offset;
}
|
random
|
<|fim_prefix|>#define TN_OUTPUT "output.weight"
#define TN_ATTN_NORM "blk.%d.attn_norm.weight"
#define TN_ATTN_Q "blk.%d.attn_q.weight"
#define TN_ATTN_K "blk.%d.attn_k.weight"
#define TN_ATTN_V "blk.%d.attn_v.weight"
#define TN_ATTN_OUTPUT "blk.%d.attn_output.weight"
#define TN_FFN_NORM "blk.%d.ffn_norm.weight"
#define TN_FFN_GATE "blk.%d.ffn_gate.weight"
#define TN_FFN_DOWN "blk.%d.ffn_down.weight"
#define TN_FFN_UP "blk.%d.ffn_up.weight"
#if defined(_MSC_VER)
#pragma warning(disable: 4244 4267) // possible loss of data
#endif
#define LLAMA_FILE_MAGIC_GGJT 0x67676a74u // 'ggjt'
#define LLAMA_FILE_VERSION_GGJT_V3 3
#define TOKENIZER_NAME "llama"
#define UNKNOWN_TOKEN_ID 0
#define BOS_TOKEN_ID 1
#define EOS_TOKEN_ID 2
//////////////////////////////////////// llama2.c model structs and functions to load models, alloc memory etc.
typedef struct {
int dim; // transformer dimension
int hidden_dim; // for ffn layers
int n_layers; // number of layers
int n_heads; // number of query heads
int n_kv_heads; // number of key/value heads (can be < query heads because of multiquery)
int vocab_size; // vocabulary size, usually 256 (byte-level)
int seq_len; // max sequence length
} Config;
struct TransformerWeights {
// token embedding table
std::vector<float> token_embedding_table; // (vocab_size, dim)
// weights for rmsnorms
std::vector<float> rms_att_weight; // (layer, dim) rmsnorm weights
std::vector<float> rms_ffn_weight; // (layer, dim)
// weights for matmuls
std::vector<float> wq; // (layer, dim, dim)
std::vector<float> wk; // (layer, dim, dim)
std::vector<float> wv; // (layer, dim, dim)
std::vector<float> wo; // (layer, dim, dim)
// weights for ffn
std::vector<float> w1; // (layer, hidden_dim, dim)
std::vector<float> w2; // (layer, dim, hidden_dim)
std::vector<float> w3; // (layer, hidden_dim, dim)
// final rmsnorm<|fim_suffix|> // freq_cis for RoPE relatively positional embeddings
// std::vector<float> freq_cis_real; // (seq_len, dim/2)
// std::vector<float> freq_cis_imag; // (seq_len, dim/2)
// (optional) classifier weights for the logits, on the last layer
std::vector<float> wcls;
};
static void alloc_weights(TransformerWeights * w, const Config * p, bool shared_weights) {
const int n_multiqueries = p->n_kv_heads <= 0 || p->n_kv_heads >= p->n_heads ? 1 : p->n_heads / p->n_kv_heads;
try {
w->token_embedding_table.resize(p->vocab_size * p->dim);
LOG_INF("%s: Allocating [%d] x [%d] = [%d] float space for w->token_embedding_table\n",__func__,p->vocab_size , p->dim, p->vocab_size * p->dim);
w->rms_att_weight.resize(p->n_layers * p->dim);
LOG_INF("%s: Allocating [%d] x [%d] = [%d] float space for w->rms_att_weight\n",__func__,p->n_layers, p->dim, p->n_layers * p->dim);
w->rms_ffn_weight.resize(p->n_layers * p->dim);
LOG_INF("%s: Allocating [%d] x [%d] = [%d] float space for w->rms_ffn_weight\n",__func__,p->n_layers , p->dim, p->n_layers * p->dim);
w->wq.resize(p->n_layers * p->dim * p->dim);
LOG_INF("%s: Allocating [%d] x [%d] x [%d] = [%d] float space for w->wq\n",__func__,p->n_layers, p->dim, p->dim, p->n_layers * p->dim * p->dim);
w->wk.resize(p->n_layers * p->dim * p->dim / n_multiqueries);
LOG_INF("%s: Allocating [%d] x [%d] x [%d] = [%d] float space for w->wk\n",__func__,p->n_layers, p->dim, p->dim / n_multiqueries, p->n_layers * p->dim * p->dim / n_multiqueries);
w->wv.resize(p->n_layers * p->dim * p->dim / n_multiqueries);
LOG_INF("%s: Allocating [%d] x [%d] x [%d] = [%d] float space for w->wv\n",__func__, p->n_layers, p->dim, p->dim / n_multiqueries, p->n_layers * p->dim * p->dim / n_multiqueries);
w->wo.resize(p->n_layers * p->dim * p->dim);<|fim_middle|> std::vector<float> rms_final_weight; // (dim,)
|
#define TN_OUTPUT "output.weight"
#define TN_ATTN_NORM "blk.%d.attn_norm.weight"
#define TN_ATTN_Q "blk.%d.attn_q.weight"
#define TN_ATTN_K "blk.%d.attn_k.weight"
#define TN_ATTN_V "blk.%d.attn_v.weight"
#define TN_ATTN_OUTPUT "blk.%d.attn_output.weight"
#define TN_FFN_NORM "blk.%d.ffn_norm.weight"
#define TN_FFN_GATE "blk.%d.ffn_gate.weight"
#define TN_FFN_DOWN "blk.%d.ffn_down.weight"
#define TN_FFN_UP "blk.%d.ffn_up.weight"
#if defined(_MSC_VER)
#pragma warning(disable: 4244 4267) // possible loss of data
#endif
#define LLAMA_FILE_MAGIC_GGJT 0x67676a74u // 'ggjt'
#define LLAMA_FILE_VERSION_GGJT_V3 3
#define TOKENIZER_NAME "llama"
#define UNKNOWN_TOKEN_ID 0
#define BOS_TOKEN_ID 1
#define EOS_TOKEN_ID 2
//////////////////////////////////////// llama2.c model structs and functions to load models, alloc memory etc.
typedef struct {
int dim; // transformer dimension
int hidden_dim; // for ffn layers
int n_layers; // number of layers
int n_heads; // number of query heads
int n_kv_heads; // number of key/value heads (can be < query heads because of multiquery)
int vocab_size; // vocabulary size, usually 256 (byte-level)
int seq_len; // max sequence length
} Config;
struct TransformerWeights {
// token embedding table
std::vector<float> token_embedding_table; // (vocab_size, dim)
// weights for rmsnorms
std::vector<float> rms_att_weight; // (layer, dim) rmsnorm weights
std::vector<float> rms_ffn_weight; // (layer, dim)
// weights for matmuls
std::vector<float> wq; // (layer, dim, dim)
std::vector<float> wk; // (layer, dim, dim)
std::vector<float> wv; // (layer, dim, dim)
std::vector<float> wo; // (layer, dim, dim)
// weights for ffn
std::vector<float> w1; // (layer, hidden_dim, dim)
std::vector<float> w2; // (layer, dim, hidden_dim)
std::vector<float> w3; // (layer, hidden_dim, dim)
// final rmsnorm
|
std::vector<float> rms_final_weight; // (dim,)
|
// freq_cis for RoPE relatively positional embeddings
// std::vector<float> freq_cis_real; // (seq_len, dim/2)
// std::vector<float> freq_cis_imag; // (seq_len, dim/2)
// (optional) classifier weights for the logits, on the last layer
std::vector<float> wcls;
};
static void alloc_weights(TransformerWeights * w, const Config * p, bool shared_weights) {
const int n_multiqueries = p->n_kv_heads <= 0 || p->n_kv_heads >= p->n_heads ? 1 : p->n_heads / p->n_kv_heads;
try {
w->token_embedding_table.resize(p->vocab_size * p->dim);
LOG_INF("%s: Allocating [%d] x [%d] = [%d] float space for w->token_embedding_table\n",__func__,p->vocab_size , p->dim, p->vocab_size * p->dim);
w->rms_att_weight.resize(p->n_layers * p->dim);
LOG_INF("%s: Allocating [%d] x [%d] = [%d] float space for w->rms_att_weight\n",__func__,p->n_layers, p->dim, p->n_layers * p->dim);
w->rms_ffn_weight.resize(p->n_layers * p->dim);
LOG_INF("%s: Allocating [%d] x [%d] = [%d] float space for w->rms_ffn_weight\n",__func__,p->n_layers , p->dim, p->n_layers * p->dim);
w->wq.resize(p->n_layers * p->dim * p->dim);
LOG_INF("%s: Allocating [%d] x [%d] x [%d] = [%d] float space for w->wq\n",__func__,p->n_layers, p->dim, p->dim, p->n_layers * p->dim * p->dim);
w->wk.resize(p->n_layers * p->dim * p->dim / n_multiqueries);
LOG_INF("%s: Allocating [%d] x [%d] x [%d] = [%d] float space for w->wk\n",__func__,p->n_layers, p->dim, p->dim / n_multiqueries, p->n_layers * p->dim * p->dim / n_multiqueries);
w->wv.resize(p->n_layers * p->dim * p->dim / n_multiqueries);
LOG_INF("%s: Allocating [%d] x [%d] x [%d] = [%d] float space for w->wv\n",__func__, p->n_layers, p->dim, p->dim / n_multiqueries, p->n_layers * p->dim * p->dim / n_multiqueries);
w->wo.resize(p->n_layers * p->dim * p->dim);
|
random
|
<|fim_prefix|> CAROTENE_NS::Margin(src_roi_x, src_full_width - width - src_roi_x, src_roi_y, src_full_height - height - src_roi_y)), \
CV_HAL_ERROR_OK : \
CV_HAL_ERROR_NOT_IMPLEMENTED \
: CV_HAL_ERROR_NOT_IMPLEMENTED \
)
#undef cv_hal_morphInit
#define cv_hal_morphInit TEGRA_MORPHINIT
#undef cv_hal_morph
#define cv_hal_morph TEGRA_MORPHIMPL
#undef cv_hal_morphFree
#define cv_hal_morphFree TEGRA_MORPHFREE
#define TEGRA_RESIZE(src_type, src_data, src_step, src_width, src_height, dst_data, dst_step, dst_width, dst_height, inv_scale_x, inv_scale_y, interpolation) \
( \
interpolation == CV_HAL_INTER_LINEAR ? \
CV_MAT_DEPTH(src_type) == CV_8U && CAROTENE_NS::isResizeLinearOpenCVSupported(CAROTENE_NS::Size2D(src_width, src_height), CAROTENE_NS::Size2D(dst_width, dst_height), ((src_type >> CV_CN_SHIFT) + 1)) && \
inv_scale_x > 0 && inv_scale_y > 0 && \
(dst_width - 0.5)/inv_scale_x - 0.5 < src_width && (dst_height - 0.5)/inv_scale_y - 0.5 < src_height && \
(dst_width + 0.5)/inv_scale_x + 0.5 >= src_width && (dst_height + 0.5)/inv_scale_y + 0.5 >= src_height && \
std::abs(dst_width / inv_scale_x - src_width) < 0.1 && std::abs(dst_height / inv_scale_y - src_height) < 0.1 ? \
CAROTENE_NS::resizeLinearOpenCV(CAROTENE_NS::Size2D(src_width, src_height), CAROTENE_NS::Size2D(dst_width, dst_height), \
src_data, src_step, dst_data, dst_step, 1.0/inv_scale_x, 1.0/inv_scale_y, ((src_type >> CV_CN_SHIFT) + 1)), \
CV_HAL_ERROR_OK : CV_HAL_ERROR_NOT_IMPLEMENTED : \
interpolation == CV_HAL_INTER_AREA ? \
CV_MAT_DEPTH(src_type) == CV_8U && CAROTENE_NS::isResizeAreaSupported(1.0/inv_scale_x, 1.0/inv_scale_y, ((src_type >> CV_CN_SHIFT) + 1)) && \
std::abs(dst_width / inv_scale_x - src_width) < 0.1 && std::abs(dst_height / inv_scale_y - src_height) < 0.1 ? \<|fim_suffix|> src_data, src_step, dst_data, dst_step, 1.0/inv_scale_x, 1.0/inv_scale_y, ((src_type >> CV_CN_SHIFT) + 1)), \
CV_HAL_ERROR_OK : CV_HAL_ERROR_NOT_IMPLEMENTED : \
/*nearest neighbour interpolation disabled due to rounding accuracy issues*/ \
/*interpolation == CV_HAL_INTER_NEAREST ? \
(src_type == CV_8UC1 || src_type == CV_8SC1) && CAROTENE_NS::isResizeNearestNeighborSupported(CAROTENE_NS::Size2D(src_width, src_height), 1) ? \
CAROTENE_NS::resizeNearestNeighbor(CAROTENE_NS::Size2D(src_width, src_height), CAROTENE_NS::Size2D(dst_width, dst_height), \
src_data, src_step, dst_data, dst_step, 1.0/inv_scale_x, 1.0/inv_scale_y, 1), \
CV_HAL_ERROR_OK : \
(src_type == CV_8UC3 || src_type == CV_8SC3) && CAROTENE_NS::isResizeNearestNeighborSupported(CAROTENE_NS::Size2D(src_width, src_height), 3) ? \
CAROTENE_NS::resizeNearestNeighbor(CAROTENE_NS::Size2D(src_width, src_height), CAROTENE_NS::Size2D(dst_width, dst_height), \
src_data, src_step, dst_data, dst_step, 1.0/inv_scale_x, 1.0/inv_scale_y, 3), \
CV_HAL_ERROR_OK : \
(src_type == CV_8UC4 || src_type == CV_8SC4 || src_type == CV_16UC2 || src_type == CV_16SC2 || src_type == CV_32SC1) && \
CAROTENE_NS::isResizeNearestNeighborSupported(CAROTENE_NS::Size2D(src_width, src_height), 4) ? \
CAROTENE_NS::resizeNearestNeighbor(CAROTENE_NS::Size2D(src_width, src_height), CAROTENE_NS::Size2D(dst_width, dst_height), \
src_data, src_step, dst_data, dst_step, 1.0/inv_scale_x, 1.0/inv_scale_y, 4), \
CV_HAL_ERROR_OK : CV_HAL_ERROR_NOT_IMPLEMENTED :*/ \
CV_HAL_ERROR_NOT_IMPLEMENTED \
)
<|fim_middle|> CAROTENE_NS::resizeAreaOpenCV(CAROTENE_NS::Size2D(src_width, src_height), CAROTENE_NS::Size2D(dst_width, dst_height), \
|
CAROTENE_NS::Margin(src_roi_x, src_full_width - width - src_roi_x, src_roi_y, src_full_height - height - src_roi_y)), \
CV_HAL_ERROR_OK : \
CV_HAL_ERROR_NOT_IMPLEMENTED \
: CV_HAL_ERROR_NOT_IMPLEMENTED \
)
#undef cv_hal_morphInit
#define cv_hal_morphInit TEGRA_MORPHINIT
#undef cv_hal_morph
#define cv_hal_morph TEGRA_MORPHIMPL
#undef cv_hal_morphFree
#define cv_hal_morphFree TEGRA_MORPHFREE
#define TEGRA_RESIZE(src_type, src_data, src_step, src_width, src_height, dst_data, dst_step, dst_width, dst_height, inv_scale_x, inv_scale_y, interpolation) \
( \
interpolation == CV_HAL_INTER_LINEAR ? \
CV_MAT_DEPTH(src_type) == CV_8U && CAROTENE_NS::isResizeLinearOpenCVSupported(CAROTENE_NS::Size2D(src_width, src_height), CAROTENE_NS::Size2D(dst_width, dst_height), ((src_type >> CV_CN_SHIFT) + 1)) && \
inv_scale_x > 0 && inv_scale_y > 0 && \
(dst_width - 0.5)/inv_scale_x - 0.5 < src_width && (dst_height - 0.5)/inv_scale_y - 0.5 < src_height && \
(dst_width + 0.5)/inv_scale_x + 0.5 >= src_width && (dst_height + 0.5)/inv_scale_y + 0.5 >= src_height && \
std::abs(dst_width / inv_scale_x - src_width) < 0.1 && std::abs(dst_height / inv_scale_y - src_height) < 0.1 ? \
CAROTENE_NS::resizeLinearOpenCV(CAROTENE_NS::Size2D(src_width, src_height), CAROTENE_NS::Size2D(dst_width, dst_height), \
src_data, src_step, dst_data, dst_step, 1.0/inv_scale_x, 1.0/inv_scale_y, ((src_type >> CV_CN_SHIFT) + 1)), \
CV_HAL_ERROR_OK : CV_HAL_ERROR_NOT_IMPLEMENTED : \
interpolation == CV_HAL_INTER_AREA ? \
CV_MAT_DEPTH(src_type) == CV_8U && CAROTENE_NS::isResizeAreaSupported(1.0/inv_scale_x, 1.0/inv_scale_y, ((src_type >> CV_CN_SHIFT) + 1)) && \
std::abs(dst_width / inv_scale_x - src_width) < 0.1 && std::abs(dst_height / inv_scale_y - src_height) < 0.1 ? \
|
CAROTENE_NS::resizeAreaOpenCV(CAROTENE_NS::Size2D(src_width, src_height), CAROTENE_NS::Size2D(dst_width, dst_height), \
|
src_data, src_step, dst_data, dst_step, 1.0/inv_scale_x, 1.0/inv_scale_y, ((src_type >> CV_CN_SHIFT) + 1)), \
CV_HAL_ERROR_OK : CV_HAL_ERROR_NOT_IMPLEMENTED : \
/*nearest neighbour interpolation disabled due to rounding accuracy issues*/ \
/*interpolation == CV_HAL_INTER_NEAREST ? \
(src_type == CV_8UC1 || src_type == CV_8SC1) && CAROTENE_NS::isResizeNearestNeighborSupported(CAROTENE_NS::Size2D(src_width, src_height), 1) ? \
CAROTENE_NS::resizeNearestNeighbor(CAROTENE_NS::Size2D(src_width, src_height), CAROTENE_NS::Size2D(dst_width, dst_height), \
src_data, src_step, dst_data, dst_step, 1.0/inv_scale_x, 1.0/inv_scale_y, 1), \
CV_HAL_ERROR_OK : \
(src_type == CV_8UC3 || src_type == CV_8SC3) && CAROTENE_NS::isResizeNearestNeighborSupported(CAROTENE_NS::Size2D(src_width, src_height), 3) ? \
CAROTENE_NS::resizeNearestNeighbor(CAROTENE_NS::Size2D(src_width, src_height), CAROTENE_NS::Size2D(dst_width, dst_height), \
src_data, src_step, dst_data, dst_step, 1.0/inv_scale_x, 1.0/inv_scale_y, 3), \
CV_HAL_ERROR_OK : \
(src_type == CV_8UC4 || src_type == CV_8SC4 || src_type == CV_16UC2 || src_type == CV_16SC2 || src_type == CV_32SC1) && \
CAROTENE_NS::isResizeNearestNeighborSupported(CAROTENE_NS::Size2D(src_width, src_height), 4) ? \
CAROTENE_NS::resizeNearestNeighbor(CAROTENE_NS::Size2D(src_width, src_height), CAROTENE_NS::Size2D(dst_width, dst_height), \
src_data, src_step, dst_data, dst_step, 1.0/inv_scale_x, 1.0/inv_scale_y, 4), \
CV_HAL_ERROR_OK : CV_HAL_ERROR_NOT_IMPLEMENTED :*/ \
CV_HAL_ERROR_NOT_IMPLEMENTED \
)
|
random
|
<|fim_prefix|> if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->commit_action();
moving_handle = 0;
queue_redraw();
}
}
}
bool AnimationBezierTrackEdit::_try_select_at_ui_pos(const Point2 &p_pos, bool p_aggregate, bool p_deselectable) {
for (int i = 0; i < edit_points.size(); i++) {
// Path 2D editing in the 3D and 2D editors works the same way. (?)
if (edit_points[i].point_rect.has_point(p_pos)) {
IntPair pair = IntPair(edit_points[i].track, edit_points[i].key);
if (p_aggregate) {
// Add to selection.
if (selection.has(pair)) {
if (p_deselectable) {
selection.erase(pair);
emit_signal(SNAME("deselect_key"), edit_points[i].key, edit_points[i].track);
}
} else {
_select_at_anim(animation, edit_points[i].track, animation->track_get_key_time(edit_points[i].track, edit_points[i].key), false);
}
queue_redraw();
select_single_attempt = IntPair(-1, -1);
} else {
if (p_deselectable) {
moving_selection_attempt = true;
moving_selection_from_key = pair.second;
moving_selection_from_track = pair.first;
moving_selection_mouse_begin = p_pos;
moving_selection_offset = Vector2();
moving_handle_track = pair.first;
moving_handle_left = animation->bezier_track_get_key_in_handle(pair.first, pair.second);
moving_handle_right = animation->bezier_track_get_key_out_handle(pair.first, pair.second);
if (selection.has(pair)) {
moving_selection = false;
} else {
moving_selection = true;
}
select_single_attempt = pair;
}
set_animation_and_track(animation, pair.first, read_only);
if (!selection.has(pair)) {
selection.clear();
_select_at_anim(animation, edit_points[i].track, animation->track_get_key_time(edit_points[i].track, edit_points[i].key), true);
}
}<|fim_suffix|> if (mm->get_position().x > timeline->get_name_limit()) {
timeline_v_scroll += p_scroll_vec.y * timeline_v_zoom;
timeline_v_scroll = CLAMP(timeline_v_scroll, -100000, 100000);
timeline->set_value(timeline->get_value() - p_scroll_vec.x / timeline->get_zoom_scale());
} else {
track_v_scroll += p_scroll_vec.y;
if (track_v_scroll < -track_v_scroll_max) {
track_v_scroll = -track_v_scroll_max;
} else if (track_v_scroll > 0) {
track_v_scroll = 0;
}
}
queue_redraw();
}
}
void AnimationBezierTrackEdit::_zoom_callback(float p_zoom_factor, Vector2 p_origin, Ref<InputEvent> p_event) {
const float v_zoom_orig = timeline_v_zoom;
Ref<InputEventWithModifiers> iewm = p_event;
if (iewm.is_valid() && iewm->is_alt_pressed()) {
// Alternate zoom (doesn't affect timeline).
timeline_v_zoom = CLAMP(timeline_v_zoom / p_zoom_factor, 0.000001, 100000);
} else {
float zoom_factor = p_zoom_factor > 1.0 ? AnimationTimelineEdit::SCROLL_ZOOM_FACTOR_IN : AnimationTimelineEdit::SCROLL_ZOOM_FACTOR_OUT;
timeline->_zoom_callback(zoom_factor, p_origin, p_event);
}
timeline_v_scroll = timeline_v_scroll + (p_origin.y - get_size().y / 2.0) * (timeline_v_zoom - v_zoom_orig);
queue_redraw();
}
float AnimationBezierTrackEdit::get_bezier_key_value(Array p_bezier_key_array) {
return p_bezier_key_array[0];
}
void AnimationBezierTrackEdit::_menu_selected(int p_index) {
int limit = timeline->get_name_limit();
real_t time = ((menu_insert_key.x - limit) / timeline->get_zoom_scale()) + timeline->get_value();
switch (p_index) {
case MENU_KEY_INSERT: {
if (animation->get_track_count() > 0) {
if (editor->snap_keys->is_pressed() && editor->step->get_value() != 0) {
time = editor->snap_time(time);
}
while (animation->track_find_key(selected_track, time, Animation::FIND_MODE_APPROX) != -1) {
time += 0.001;
}
float h = (get_size().height / 2.0 - menu_insert_key.y) * timeline_v_zoom + timeline_v_scroll;<|fim_middle|> return true;
}
}
return false;
}
void AnimationBezierTrackEdit::_pan_callback(Vector2 p_scroll_vec, Ref<InputEvent> p_event) {
Ref<InputEventMouseMotion> mm = p_event;
if (mm.is_valid()) {
|
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->commit_action();
moving_handle = 0;
queue_redraw();
}
}
}
bool AnimationBezierTrackEdit::_try_select_at_ui_pos(const Point2 &p_pos, bool p_aggregate, bool p_deselectable) {
for (int i = 0; i < edit_points.size(); i++) {
// Path 2D editing in the 3D and 2D editors works the same way. (?)
if (edit_points[i].point_rect.has_point(p_pos)) {
IntPair pair = IntPair(edit_points[i].track, edit_points[i].key);
if (p_aggregate) {
// Add to selection.
if (selection.has(pair)) {
if (p_deselectable) {
selection.erase(pair);
emit_signal(SNAME("deselect_key"), edit_points[i].key, edit_points[i].track);
}
} else {
_select_at_anim(animation, edit_points[i].track, animation->track_get_key_time(edit_points[i].track, edit_points[i].key), false);
}
queue_redraw();
select_single_attempt = IntPair(-1, -1);
} else {
if (p_deselectable) {
moving_selection_attempt = true;
moving_selection_from_key = pair.second;
moving_selection_from_track = pair.first;
moving_selection_mouse_begin = p_pos;
moving_selection_offset = Vector2();
moving_handle_track = pair.first;
moving_handle_left = animation->bezier_track_get_key_in_handle(pair.first, pair.second);
moving_handle_right = animation->bezier_track_get_key_out_handle(pair.first, pair.second);
if (selection.has(pair)) {
moving_selection = false;
} else {
moving_selection = true;
}
select_single_attempt = pair;
}
set_animation_and_track(animation, pair.first, read_only);
if (!selection.has(pair)) {
selection.clear();
_select_at_anim(animation, edit_points[i].track, animation->track_get_key_time(edit_points[i].track, edit_points[i].key), true);
}
}
|
return true;
}
}
return false;
}
void AnimationBezierTrackEdit::_pan_callback(Vector2 p_scroll_vec, Ref<InputEvent> p_event) {
Ref<InputEventMouseMotion> mm = p_event;
if (mm.is_valid()) {
|
if (mm->get_position().x > timeline->get_name_limit()) {
timeline_v_scroll += p_scroll_vec.y * timeline_v_zoom;
timeline_v_scroll = CLAMP(timeline_v_scroll, -100000, 100000);
timeline->set_value(timeline->get_value() - p_scroll_vec.x / timeline->get_zoom_scale());
} else {
track_v_scroll += p_scroll_vec.y;
if (track_v_scroll < -track_v_scroll_max) {
track_v_scroll = -track_v_scroll_max;
} else if (track_v_scroll > 0) {
track_v_scroll = 0;
}
}
queue_redraw();
}
}
void AnimationBezierTrackEdit::_zoom_callback(float p_zoom_factor, Vector2 p_origin, Ref<InputEvent> p_event) {
const float v_zoom_orig = timeline_v_zoom;
Ref<InputEventWithModifiers> iewm = p_event;
if (iewm.is_valid() && iewm->is_alt_pressed()) {
// Alternate zoom (doesn't affect timeline).
timeline_v_zoom = CLAMP(timeline_v_zoom / p_zoom_factor, 0.000001, 100000);
} else {
float zoom_factor = p_zoom_factor > 1.0 ? AnimationTimelineEdit::SCROLL_ZOOM_FACTOR_IN : AnimationTimelineEdit::SCROLL_ZOOM_FACTOR_OUT;
timeline->_zoom_callback(zoom_factor, p_origin, p_event);
}
timeline_v_scroll = timeline_v_scroll + (p_origin.y - get_size().y / 2.0) * (timeline_v_zoom - v_zoom_orig);
queue_redraw();
}
float AnimationBezierTrackEdit::get_bezier_key_value(Array p_bezier_key_array) {
return p_bezier_key_array[0];
}
void AnimationBezierTrackEdit::_menu_selected(int p_index) {
int limit = timeline->get_name_limit();
real_t time = ((menu_insert_key.x - limit) / timeline->get_zoom_scale()) + timeline->get_value();
switch (p_index) {
case MENU_KEY_INSERT: {
if (animation->get_track_count() > 0) {
if (editor->snap_keys->is_pressed() && editor->step->get_value() != 0) {
time = editor->snap_time(time);
}
while (animation->track_find_key(selected_track, time, Animation::FIND_MODE_APPROX) != -1) {
time += 0.001;
}
float h = (get_size().height / 2.0 - menu_insert_key.y) * timeline_v_zoom + timeline_v_scroll;
|
random
|
<|fim_prefix|> gguf_set_val_u32(ctx, KV_ROPE_DIMENSION_COUNT, model->hparams.n_rot);
gguf_set_val_f32(ctx, KV_ATTENTION_LAYERNORM_RMS_EPS, 1e-5f);
// write tensors
ggml_set_name(model->tok_embeddings, TN_TOKEN_EMBD);
gguf_add_tensor(ctx, model->tok_embeddings);
ggml_set_name(model->norm, TN_OUTPUT_NORM);
gguf_add_tensor(ctx, model->norm);
ggml_set_name(model->output, TN_OUTPUT);
gguf_add_tensor(ctx, model->output);
for (uint32_t i = 0; i < model->hparams.n_layer; ++i) {
auto & layer = model->layers[i];
ggml_format_name(layer.wq, TN_ATTN_Q, i);
gguf_add_tensor(ctx, layer.wq);
ggml_format_name(layer.wk, TN_ATTN_K, i);
gguf_add_tensor(ctx, layer.wk);
ggml_format_name(layer.wv, TN_ATTN_V, i);
gguf_add_tensor(ctx, layer.wv);
ggml_format_name(layer.wo, TN_ATTN_OUTPUT, i);
gguf_add_tensor(ctx, layer.wo);
ggml_format_name(layer.attention_norm, TN_ATTN_NORM, i);
gguf_add_tensor(ctx, layer.attention_norm);
ggml_format_name(layer.w1, TN_FFN_GATE, i);
gguf_add_tensor(ctx, layer.w1);
ggml_format_name(layer.w2, TN_FFN_DOWN, i);
gguf_add_tensor(ctx, layer.w2);
ggml_format_name(layer.w3, TN_FFN_UP, i);
gguf_add_tensor(ctx, layer.w3);
ggml_format_name(layer.ffn_norm, TN_FFN_NORM, i);
gguf_add_tensor(ctx, layer.ffn_norm);
}
gguf_write_to_file(ctx, filename, false);
gguf_free(ctx);
}
static struct train_params get_default_train_params() {
struct train_params params;
params.fn_vocab_model = "models/7B/ggml-model-f16.gguf";
params.fn_llama2c_output_model = "ak_llama_model.bin";
params.fn_train_data = "shakespeare.txt";
params.fn_checkpoint_in = "checkpoint.bin";
params.fn_checkpoint_out = "checkpoint.bin";
params.fn_model_out = "ggml-checkpoint-f32.bin";
params.seed = -1;
params.n_ctx = 128;
<|fim_suffix|>
params.n_mult = 256;
params.n_head = 8;
params.n_layer = 16;
params.n_rotmax = 64;
params.n_threads = 6;
params.n_batch = 8;
params.n_examples = 8;
params.n_predict = 1024;
params.print_info_interval = 1;
params.print_details_interval = 2;
params.samples_start_after_nl = false;
params.use_adam = true;
params.use_flash = false;
params.use_scratch = true;
// only adam
params.warmup = 100;
params.cos_decay_steps = 1000;
params.cos_decay_restart = 1.1f;
params.cos_decay_alpha = 0.0f;
params.lbfgs_n_iter = 16;
params.adam_n_iter = 16;
params.adam_alpha = 1e-3f;
params.adam_decay = 1e-3f;
params.mem_model_gb = 2;
params.mem_compute_gb = 24;
params.mem_compute0_gb = 8;
params.mem_compute1_gb = 2;
return params;
}
static void print_usage(int /*argc*/, char ** argv, const struct train_params * params) {
fprintf(stderr, "usage: %s [options]\n", argv[0]);
fprintf(stderr, "\n");
fprintf(stderr, "options:\n");
fprintf(stderr, " -h, --help show this help message and exit\n");
fprintf(stderr, " --copy-vocab-from-model FNAME path of gguf llama model or llama2.c vocabulary from which to copy vocab (default '%s')\n", params->fn_vocab_model);
fprintf(stderr, " --llama2c-model FNAME [REQUIRED] model path from which to load Karpathy's llama2.c model\n");
fprintf(stderr, " --llama2c-output-model FNAME model path to save the converted llama2.c model (default %s')\n", params->fn_llama2c_output_model);
fprintf(stderr, "\n");
}
static bool params_parse(int argc, char ** argv, struct train_params * params) {
bool invalid_param = false;
bool reqd_param_found = false;
std::string arg;
struct train_params default_params = get_default_train_params();
const std::st<|fim_middle|>params.n_embd = 256;
|
gguf_set_val_u32(ctx, KV_ROPE_DIMENSION_COUNT, model->hparams.n_rot);
gguf_set_val_f32(ctx, KV_ATTENTION_LAYERNORM_RMS_EPS, 1e-5f);
// write tensors
ggml_set_name(model->tok_embeddings, TN_TOKEN_EMBD);
gguf_add_tensor(ctx, model->tok_embeddings);
ggml_set_name(model->norm, TN_OUTPUT_NORM);
gguf_add_tensor(ctx, model->norm);
ggml_set_name(model->output, TN_OUTPUT);
gguf_add_tensor(ctx, model->output);
for (uint32_t i = 0; i < model->hparams.n_layer; ++i) {
auto & layer = model->layers[i];
ggml_format_name(layer.wq, TN_ATTN_Q, i);
gguf_add_tensor(ctx, layer.wq);
ggml_format_name(layer.wk, TN_ATTN_K, i);
gguf_add_tensor(ctx, layer.wk);
ggml_format_name(layer.wv, TN_ATTN_V, i);
gguf_add_tensor(ctx, layer.wv);
ggml_format_name(layer.wo, TN_ATTN_OUTPUT, i);
gguf_add_tensor(ctx, layer.wo);
ggml_format_name(layer.attention_norm, TN_ATTN_NORM, i);
gguf_add_tensor(ctx, layer.attention_norm);
ggml_format_name(layer.w1, TN_FFN_GATE, i);
gguf_add_tensor(ctx, layer.w1);
ggml_format_name(layer.w2, TN_FFN_DOWN, i);
gguf_add_tensor(ctx, layer.w2);
ggml_format_name(layer.w3, TN_FFN_UP, i);
gguf_add_tensor(ctx, layer.w3);
ggml_format_name(layer.ffn_norm, TN_FFN_NORM, i);
gguf_add_tensor(ctx, layer.ffn_norm);
}
gguf_write_to_file(ctx, filename, false);
gguf_free(ctx);
}
static struct train_params get_default_train_params() {
struct train_params params;
params.fn_vocab_model = "models/7B/ggml-model-f16.gguf";
params.fn_llama2c_output_model = "ak_llama_model.bin";
params.fn_train_data = "shakespeare.txt";
params.fn_checkpoint_in = "checkpoint.bin";
params.fn_checkpoint_out = "checkpoint.bin";
params.fn_model_out = "ggml-checkpoint-f32.bin";
params.seed = -1;
params.n_ctx = 128;
|
params.n_embd = 256;
|
params.n_mult = 256;
params.n_head = 8;
params.n_layer = 16;
params.n_rotmax = 64;
params.n_threads = 6;
params.n_batch = 8;
params.n_examples = 8;
params.n_predict = 1024;
params.print_info_interval = 1;
params.print_details_interval = 2;
params.samples_start_after_nl = false;
params.use_adam = true;
params.use_flash = false;
params.use_scratch = true;
// only adam
params.warmup = 100;
params.cos_decay_steps = 1000;
params.cos_decay_restart = 1.1f;
params.cos_decay_alpha = 0.0f;
params.lbfgs_n_iter = 16;
params.adam_n_iter = 16;
params.adam_alpha = 1e-3f;
params.adam_decay = 1e-3f;
params.mem_model_gb = 2;
params.mem_compute_gb = 24;
params.mem_compute0_gb = 8;
params.mem_compute1_gb = 2;
return params;
}
static void print_usage(int /*argc*/, char ** argv, const struct train_params * params) {
fprintf(stderr, "usage: %s [options]\n", argv[0]);
fprintf(stderr, "\n");
fprintf(stderr, "options:\n");
fprintf(stderr, " -h, --help show this help message and exit\n");
fprintf(stderr, " --copy-vocab-from-model FNAME path of gguf llama model or llama2.c vocabulary from which to copy vocab (default '%s')\n", params->fn_vocab_model);
fprintf(stderr, " --llama2c-model FNAME [REQUIRED] model path from which to load Karpathy's llama2.c model\n");
fprintf(stderr, " --llama2c-output-model FNAME model path to save the converted llama2.c model (default %s')\n", params->fn_llama2c_output_model);
fprintf(stderr, "\n");
}
static bool params_parse(int argc, char ** argv, struct train_params * params) {
bool invalid_param = false;
bool reqd_param_found = false;
std::string arg;
struct train_params default_params = get_default_train_params();
const std::st
|
ast_based
|
<|fim_prefix|> ggml_context * ctx = ctx_for_buft(buft);
if (!ctx) {
LLAMA_LOG_ERROR("%s: failed to allocate context for control vector\n", __func__);
return false;
}
ggml_tensor * tensor = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hparams.n_embd);
tensors.push_back(tensor);
}
// allocate tensors / buffers and zero
bufs.reserve(ctx_map.size());
for (auto it : ctx_map) {
ggml_backend_buffer_type_t buft = it.first;
ggml_context * ctx = it.second;
ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
if (!buf) {
LLAMA_LOG_ERROR("%s: failed to allocate buffer for control vector\n", __func__);
return false;
}
ggml_backend_buffer_clear(buf, 0);
bufs.emplace_back(buf);
}
return true;
}
bool llama_adapter_cvec::apply(
const llama_model & model,
const float * data,
size_t len,
int32_t n_embd,
int32_t il_start,
int32_t il_end) {
const auto & hparams = model.hparams;
if (data == nullptr) {
// disable the current control vector (but leave allocated for later)
layer_start = -1;
layer_end = -1;
return true;
}
if (n_embd != (int) hparams.n_embd) {
LLAMA_LOG_ERROR("%s: control vector n_embd does not match model\n", __func__);
return false;
}
if (tensors.empty()) {
if (!init(model)) {
return false;
}
}
layer_start = il_start;
layer_end = il_end;
for (size_t il = 1; il < hparams.n_layer; il++) {
assert(tensors[il] != nullptr);
const size_t off = n_embd * (il - 1); // buffer doesn't have data for layer 0, since it's never present
if (off + n_embd <= len) {
ggml_backend_tensor_set(tensors[il], data + off, 0, n_embd * ggml_element_size(tensors[il]));
}
}
return true;
}
// lora
<|fim_suffix|> }
return nullptr;
}
static void llama_adapter_lora_init_impl(llama_model & model, const char * path_lora, llama_adapter_lora & adapter) {
LLAMA_LOG_INFO("%s: loading lora adapter from '%s' ...\n", __func__, path_lora);
ggml_context * ctx_init;
gguf_init_params meta_gguf_params = {
/* .no_alloc = */ true,
/* .ctx = */ &ctx_init,
};
gguf_context_ptr ctx_gguf { gguf_init_from_file(path_lora, meta_gguf_params) };
if (!ctx_gguf) {
throw std::runtime_error("failed to load lora adapter file from " + std::string(path_lora));
}
ggml_context_ptr ctx { ctx_init };
// check metadata
{
const gguf_context * gguf_ctx = ctx_gguf.get();
LLAMA_LOG_INFO("%s: Dumping metadata keys/values.\n", __func__);
// get metadata as string
for (int i = 0; i < gguf_get_n_kv(gguf_ctx); i++) {
gguf_type type = gguf_get_kv_type(gguf_ctx, i);
const std::string type_name =
type == GGUF_TYPE_ARRAY
? format("%s[%s,%zu]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(gguf_ctx, i)), gguf_get_arr_n(gguf_ctx, i))
: gguf_type_name(type);
const char * name = gguf_get_key(gguf_ctx, i);
const std::string value = gguf_kv_to_str(gguf_ctx, i);
if (type != GGUF_TYPE_ARRAY) {
adapter.gguf_kv.emplace(name, value);
}
const size_t MAX_VALUE_LEN = 40;
std::string print_value = value.size() > MAX_VALUE_LEN ? format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str()) : value;
replace_all(print_value, "\n", "\\n");
LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), print_value.c_str());
}
auto get_kv_str = [&](const std::string & key) -> std::string {
int id = gguf_find_key(gguf_ctx, key.c_str());<|fim_middle|>llama_adapter_lora_weight * llama_adapter_lora::get_weight(ggml_tensor * w) {
const std::string name(w->name);
const auto pos = ab_map.find(name);
if (pos != ab_map.end()) {
return &pos->second;
|
ggml_context * ctx = ctx_for_buft(buft);
if (!ctx) {
LLAMA_LOG_ERROR("%s: failed to allocate context for control vector\n", __func__);
return false;
}
ggml_tensor * tensor = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hparams.n_embd);
tensors.push_back(tensor);
}
// allocate tensors / buffers and zero
bufs.reserve(ctx_map.size());
for (auto it : ctx_map) {
ggml_backend_buffer_type_t buft = it.first;
ggml_context * ctx = it.second;
ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
if (!buf) {
LLAMA_LOG_ERROR("%s: failed to allocate buffer for control vector\n", __func__);
return false;
}
ggml_backend_buffer_clear(buf, 0);
bufs.emplace_back(buf);
}
return true;
}
bool llama_adapter_cvec::apply(
const llama_model & model,
const float * data,
size_t len,
int32_t n_embd,
int32_t il_start,
int32_t il_end) {
const auto & hparams = model.hparams;
if (data == nullptr) {
// disable the current control vector (but leave allocated for later)
layer_start = -1;
layer_end = -1;
return true;
}
if (n_embd != (int) hparams.n_embd) {
LLAMA_LOG_ERROR("%s: control vector n_embd does not match model\n", __func__);
return false;
}
if (tensors.empty()) {
if (!init(model)) {
return false;
}
}
layer_start = il_start;
layer_end = il_end;
for (size_t il = 1; il < hparams.n_layer; il++) {
assert(tensors[il] != nullptr);
const size_t off = n_embd * (il - 1); // buffer doesn't have data for layer 0, since it's never present
if (off + n_embd <= len) {
ggml_backend_tensor_set(tensors[il], data + off, 0, n_embd * ggml_element_size(tensors[il]));
}
}
return true;
}
// lora
|
llama_adapter_lora_weight * llama_adapter_lora::get_weight(ggml_tensor * w) {
const std::string name(w->name);
const auto pos = ab_map.find(name);
if (pos != ab_map.end()) {
return &pos->second;
|
}
return nullptr;
}
static void llama_adapter_lora_init_impl(llama_model & model, const char * path_lora, llama_adapter_lora & adapter) {
LLAMA_LOG_INFO("%s: loading lora adapter from '%s' ...\n", __func__, path_lora);
ggml_context * ctx_init;
gguf_init_params meta_gguf_params = {
/* .no_alloc = */ true,
/* .ctx = */ &ctx_init,
};
gguf_context_ptr ctx_gguf { gguf_init_from_file(path_lora, meta_gguf_params) };
if (!ctx_gguf) {
throw std::runtime_error("failed to load lora adapter file from " + std::string(path_lora));
}
ggml_context_ptr ctx { ctx_init };
// check metadata
{
const gguf_context * gguf_ctx = ctx_gguf.get();
LLAMA_LOG_INFO("%s: Dumping metadata keys/values.\n", __func__);
// get metadata as string
for (int i = 0; i < gguf_get_n_kv(gguf_ctx); i++) {
gguf_type type = gguf_get_kv_type(gguf_ctx, i);
const std::string type_name =
type == GGUF_TYPE_ARRAY
? format("%s[%s,%zu]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(gguf_ctx, i)), gguf_get_arr_n(gguf_ctx, i))
: gguf_type_name(type);
const char * name = gguf_get_key(gguf_ctx, i);
const std::string value = gguf_kv_to_str(gguf_ctx, i);
if (type != GGUF_TYPE_ARRAY) {
adapter.gguf_kv.emplace(name, value);
}
const size_t MAX_VALUE_LEN = 40;
std::string print_value = value.size() > MAX_VALUE_LEN ? format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str()) : value;
replace_all(print_value, "\n", "\\n");
LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), print_value.c_str());
}
auto get_kv_str = [&](const std::string & key) -> std::string {
int id = gguf_find_key(gguf_ctx, key.c_str());
|
random
|
<|fim_prefix|> * Also return the number of recognized blobs in blob_count.
*/
int TessBaseAPI::TextLength(int *blob_count) const {
if (tesseract_ == nullptr || page_res_ == nullptr) {
return 0;
}
PAGE_RES_IT page_res_it(page_res_);
int total_length = 2;
int total_blobs = 0;
// Iterate over the data structures to extract the recognition result.
for (page_res_it.restart_page(); page_res_it.word() != nullptr; page_res_it.forward()) {
WERD_RES *word = page_res_it.word();
WERD_CHOICE *choice = word->best_choice;
if (choice != nullptr) {
total_blobs += choice->length() + 2;
total_length += choice->unichar_string().length() + 2;
for (int i = 0; i < word->reject_map.length(); ++i) {
if (word->reject_map[i].rejected()) {
++total_length;
}
}
}
}
if (blob_count != nullptr) {
*blob_count = total_blobs;
}
return total_length;
}
#ifndef DISABLED_LEGACY_ENGINE
/**
* Estimates the Orientation And Script of the image.
* Returns true if the image was processed successfully.
*/
bool TessBaseAPI::DetectOS(OSResults *osr) {
if (tesseract_ == nullptr) {
return false;
}
ClearResults();
if (tesseract_->pix_binary() == nullptr && !Threshold(&tesseract_->mutable_pix_binary()->pix_)) {
return false;
}
if (input_file_.empty()) {
input_file_ = kInputFile;
}
return orientation_and_script_detection(input_file_.c_str(), osr, tesseract_) > 0;
}
#endif // #ifndef DISABLED_LEGACY_ENGINE
void TessBaseAPI::set_min_orientation_margin(double margin) {
tesseract_->min_orientation_margin.set_value(margin);
}
/**
* Return text orientation of each block as determined in an earlier page layout
* analysis operation. Orientation is returned as the number of ccw 90-degree
* rotations (in [0..3]) required to make the text in the block upright
* (readable). Note that this may not necessary be the block orientation
* preferred for recognition (such as the case of vertical CJK text).
*<|fim_suffix|> * consistent with GetTextLines().
*/
void TessBaseAPI::GetBlockTextOrientations(int **block_orientation, bool **vertical_writing) {
delete[] * block_orientation;
*block_orientation = nullptr;
delete[] * vertical_writing;
*vertical_writing = nullptr;
BLOCK_IT block_it(block_list_);
block_it.move_to_first();
int num_blocks = 0;
for (block_it.mark_cycle_pt(); !block_it.cycled_list(); block_it.forward()) {
if (!block_it.data()->pdblk.poly_block()->IsText()) {
continue;
}
++num_blocks;
}
if (!num_blocks) {
tprintf("WARNING: Found no blocks\n");
return;
}
*block_orientation = new int[num_blocks];
*vertical_writing = new bool[num_blocks];
block_it.move_to_first();
int i = 0;
for (block_it.mark_cycle_pt(); !block_it.cycled_list(); block_it.forward()) {
if (!block_it.data()->pdblk.poly_block()->IsText()) {
continue;
}
FCOORD re_rotation = block_it.data()->re_rotation();
float re_theta = re_rotation.angle();
FCOORD classify_rotation = block_it.data()->classify_rotation();
float classify_theta = classify_rotation.angle();
double rot_theta = -(re_theta - classify_theta) * 2.0 / M_PI;
if (rot_theta < 0) {
rot_theta += 4;
}
int num_rotations = static_cast<int>(rot_theta + 0.5);
(*block_orientation)[i] = num_rotations;
// The classify_rotation is non-zero only if the text has vertical
// writing direction.
(*vertical_writing)[i] = classify_rotation.y() != 0.0f;
++i;
}
}
void TessBaseAPI::DetectParagraphs(bool after_text_recognition) {
int debug_level = 0;
GetIntVariable("paragraph_debug_level", &debug_level);
if (paragraph_models_ == nullptr) {
paragraph_models_ = new std::vector<ParagraphModel *>;
}
MutableIterator *result_it = GetMutableIterator();
do { // Detect paragraphs for this block
std::vector<ParagraphModel *> models;
::tesseract::DetectParagraphs(debug_level, after_text_recognition, result_it, &models);<|fim_middle|> * Also returns whether the text in the block is believed to have vertical
* writing direction (when in an upright page orientation).
*
* The returned array is of length equal to the number of text blocks, which may
* be less than the total number of blocks. The ordering is intended to be
|
* Also return the number of recognized blobs in blob_count.
*/
int TessBaseAPI::TextLength(int *blob_count) const {
if (tesseract_ == nullptr || page_res_ == nullptr) {
return 0;
}
PAGE_RES_IT page_res_it(page_res_);
int total_length = 2;
int total_blobs = 0;
// Iterate over the data structures to extract the recognition result.
for (page_res_it.restart_page(); page_res_it.word() != nullptr; page_res_it.forward()) {
WERD_RES *word = page_res_it.word();
WERD_CHOICE *choice = word->best_choice;
if (choice != nullptr) {
total_blobs += choice->length() + 2;
total_length += choice->unichar_string().length() + 2;
for (int i = 0; i < word->reject_map.length(); ++i) {
if (word->reject_map[i].rejected()) {
++total_length;
}
}
}
}
if (blob_count != nullptr) {
*blob_count = total_blobs;
}
return total_length;
}
#ifndef DISABLED_LEGACY_ENGINE
/**
* Estimates the Orientation And Script of the image.
* Returns true if the image was processed successfully.
*/
bool TessBaseAPI::DetectOS(OSResults *osr) {
if (tesseract_ == nullptr) {
return false;
}
ClearResults();
if (tesseract_->pix_binary() == nullptr && !Threshold(&tesseract_->mutable_pix_binary()->pix_)) {
return false;
}
if (input_file_.empty()) {
input_file_ = kInputFile;
}
return orientation_and_script_detection(input_file_.c_str(), osr, tesseract_) > 0;
}
#endif // #ifndef DISABLED_LEGACY_ENGINE
void TessBaseAPI::set_min_orientation_margin(double margin) {
tesseract_->min_orientation_margin.set_value(margin);
}
/**
* Return text orientation of each block as determined in an earlier page layout
* analysis operation. Orientation is returned as the number of ccw 90-degree
* rotations (in [0..3]) required to make the text in the block upright
* (readable). Note that this may not necessary be the block orientation
* preferred for recognition (such as the case of vertical CJK text).
*
|
* Also returns whether the text in the block is believed to have vertical
* writing direction (when in an upright page orientation).
*
* The returned array is of length equal to the number of text blocks, which may
* be less than the total number of blocks. The ordering is intended to be
|
* consistent with GetTextLines().
*/
void TessBaseAPI::GetBlockTextOrientations(int **block_orientation, bool **vertical_writing) {
delete[] * block_orientation;
*block_orientation = nullptr;
delete[] * vertical_writing;
*vertical_writing = nullptr;
BLOCK_IT block_it(block_list_);
block_it.move_to_first();
int num_blocks = 0;
for (block_it.mark_cycle_pt(); !block_it.cycled_list(); block_it.forward()) {
if (!block_it.data()->pdblk.poly_block()->IsText()) {
continue;
}
++num_blocks;
}
if (!num_blocks) {
tprintf("WARNING: Found no blocks\n");
return;
}
*block_orientation = new int[num_blocks];
*vertical_writing = new bool[num_blocks];
block_it.move_to_first();
int i = 0;
for (block_it.mark_cycle_pt(); !block_it.cycled_list(); block_it.forward()) {
if (!block_it.data()->pdblk.poly_block()->IsText()) {
continue;
}
FCOORD re_rotation = block_it.data()->re_rotation();
float re_theta = re_rotation.angle();
FCOORD classify_rotation = block_it.data()->classify_rotation();
float classify_theta = classify_rotation.angle();
double rot_theta = -(re_theta - classify_theta) * 2.0 / M_PI;
if (rot_theta < 0) {
rot_theta += 4;
}
int num_rotations = static_cast<int>(rot_theta + 0.5);
(*block_orientation)[i] = num_rotations;
// The classify_rotation is non-zero only if the text has vertical
// writing direction.
(*vertical_writing)[i] = classify_rotation.y() != 0.0f;
++i;
}
}
void TessBaseAPI::DetectParagraphs(bool after_text_recognition) {
int debug_level = 0;
GetIntVariable("paragraph_debug_level", &debug_level);
if (paragraph_models_ == nullptr) {
paragraph_models_ = new std::vector<ParagraphModel *>;
}
MutableIterator *result_it = GetMutableIterator();
do { // Detect paragraphs for this block
std::vector<ParagraphModel *> models;
::tesseract::DetectParagraphs(debug_level, after_text_recognition, result_it, &models);
|
random
|
<|fim_prefix|>// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2021 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_ADDRDB_H
#define BITCOIN_ADDRDB_H
#include <net_types.h>
#include <util/fs.h>
#include <util/result.h>
#include <memory>
#include <vector>
class ArgsManager;
class AddrMan;
class CAddress;
class DataStream;
class NetGroupManager;
<|fim_suffix|>void ReadFromStream(AddrMan& addr, DataStream& ssPeers);
bool DumpPeerAddresses(const ArgsManager& args, const AddrMan& addr);
/** Access to the banlist database (banlist.json) */
class CBanDB
{
private:
/**
* JSON key under which the data is stored in the json database.
*/
static constexpr const char* JSON_KEY = "banned_nets";
const fs::path m_banlist_dat;
const fs::path m_banlist_json;
public:
explicit CBanDB(fs::path ban_list_path);
bool Write(const banmap_t& banSet);
/**
* Read the banlist from disk.
* @param[out] banSet The loaded list. Set if `true` is returned, otherwise it is left
* in an undefined state.
* @return true on success
*/
bool Read(banmap_t& banSet);
};
/** Returns an error string on failure */
util::Result<std::unique_ptr<AddrMan>> LoadAddrman(const NetGroupManager& netgroupman, const ArgsManager& args);
/**
* Dump the anchor IP address database (anchors.dat)
*
* Anchors are last known outgoing block-relay-only peers that are
* tried to re-connect to on startup.
*/
void DumpAnchors(const fs::path& anchors_db_path, const std::vector<CAddress>& anchors);
/**
* Read the anchor IP address database (anchors.dat)
*
* Deleting anchors.dat is intentional as it avoids renewed peering to anchors after
* an unclean shutdown and thus potential exploitation of the anchor peer policy.
*/
std::vector<CAddress> ReadAnchors(const fs::path& anchors_db_path);
#endif // BITCOIN_ADDRDB_H
<|fim_middle|>/** Only used by tests. */
|
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2021 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_ADDRDB_H
#define BITCOIN_ADDRDB_H
#include <net_types.h>
#include <util/fs.h>
#include <util/result.h>
#include <memory>
#include <vector>
class ArgsManager;
class AddrMan;
class CAddress;
class DataStream;
class NetGroupManager;
|
/** Only used by tests. */
|
void ReadFromStream(AddrMan& addr, DataStream& ssPeers);
bool DumpPeerAddresses(const ArgsManager& args, const AddrMan& addr);
/** Access to the banlist database (banlist.json) */
class CBanDB
{
private:
/**
* JSON key under which the data is stored in the json database.
*/
static constexpr const char* JSON_KEY = "banned_nets";
const fs::path m_banlist_dat;
const fs::path m_banlist_json;
public:
explicit CBanDB(fs::path ban_list_path);
bool Write(const banmap_t& banSet);
/**
* Read the banlist from disk.
* @param[out] banSet The loaded list. Set if `true` is returned, otherwise it is left
* in an undefined state.
* @return true on success
*/
bool Read(banmap_t& banSet);
};
/** Returns an error string on failure */
util::Result<std::unique_ptr<AddrMan>> LoadAddrman(const NetGroupManager& netgroupman, const ArgsManager& args);
/**
* Dump the anchor IP address database (anchors.dat)
*
* Anchors are last known outgoing block-relay-only peers that are
* tried to re-connect to on startup.
*/
void DumpAnchors(const fs::path& anchors_db_path, const std::vector<CAddress>& anchors);
/**
* Read the anchor IP address database (anchors.dat)
*
* Deleting anchors.dat is intentional as it avoids renewed peering to anchors after
* an unclean shutdown and thus potential exploitation of the anchor peer policy.
*/
std::vector<CAddress> ReadAnchors(const fs::path& anchors_db_path);
#endif // BITCOIN_ADDRDB_H
|
random
|
<|fim_prefix|>#endif
/** Minimum sensible image size to be worth running Tesseract. */
const int kMinRectSize = 10;
/** Character returned when Tesseract couldn't recognize as anything. */
const char kTesseractReject = '~';
/** Character used by UNLV error counter as a reject. */
const char kUNLVReject = '~';
/** Character used by UNLV as a suspect marker. */
const char kUNLVSuspect = '^';
/**
* Temp file used for storing current parameters before applying retry values.
*/
static const char *kOldVarsFile = "failed_vars.txt";
#ifndef DISABLED_LEGACY_ENGINE
/**
* Filename used for input image file, from which to derive a name to search
* for a possible UNLV zone file, if none is specified by SetInputName.
*/
static const char *kInputFile = "noname.tif";
static const char kUnknownFontName[] = "UnknownFont";
static STRING_VAR(classify_font_name, kUnknownFontName,
"Default font name to be used in training");
// Finds the name of the training font and returns it in fontname, by cutting
// it out based on the expectation that the filename is of the form:
// /path/to/dir/[lang].[fontname].exp[num]
// The [lang], [fontname] and [num] fields should not have '.' characters.
// If the global parameter classify_font_name is set, its value is used instead.
static void ExtractFontName(const char* filename, std::string* fontname) {
*fontname = classify_font_name;
if (*fontname == kUnknownFontName) {
// filename is expected to be of the form [lang].[fontname].exp[num]
// The [lang], [fontname] and [num] fields should not have '.' characters.
const char *basename = strrchr(filename, '/');
const char *firstdot = strchr(basename ? basename : filename, '.');
const char *lastdot = strrchr(filename, '.');
if (firstdot != lastdot && firstdot != nullptr && lastdot != nullptr) {
++firstdot;
*fontname = firstdot;
fontname->resize(lastdot - firstdot);
}
}
}
#endif<|fim_suffix|> std::vector<std::string> *langs) {
for (const auto& entry :
std::filesystem::recursive_directory_iterator(datadir,
std::filesystem::directory_options::follow_directory_symlink |
std::filesystem::directory_options::skip_permission_denied)) {
auto path = entry.path().lexically_relative(datadir);
if (path.extension() == ".traineddata") {
langs->push_back(path.replace_extension("").string());
}
}
}
TessBaseAPI::TessBaseAPI()
: tesseract_(nullptr)
, osd_tesseract_(nullptr)
, equ_detect_(nullptr)
, reader_(nullptr)
,
// thresholder_ is initialized to nullptr here, but will be set before use
// by: A constructor of a derived API or created
// implicitly when used in InternalSetImage.
thresholder_(nullptr)
, paragraph_models_(nullptr)
, block_list_(nullptr)
, page_res_(nullptr)
, last_oem_requested_(OEM_DEFAULT)
, recognition_done_(false)
, rect_left_(0)
, rect_top_(0)
, rect_width_(0)
, rect_height_(0)
, image_width_(0)
, image_height_(0) {
}
TessBaseAPI::~TessBaseAPI() {
End();
}
/**
* Returns the version identifier as a static string. Do not delete.
*/
const char *TessBaseAPI::Version() {
return TESSERACT_VERSION_STR;
}
/**
* Set the name of the input file. Needed only for training and
* loading a UNLV zone file.
*/
void TessBaseAPI::SetInputName(const char *name) {
input_file_ = name ? name : "";
}
/** Set the name of the output files. Needed only for debugging. */
void TessBaseAPI::SetOutputName(const char *name) {
output_file_ = name ? name : "";
}
bool TessBaseAPI::SetVariable(const char *name, const char *value) {
if (tesseract_ == nullptr) {
tesseract_ = new Tesseract;
}
return ParamUtils::SetParam(name, value, SET_PARAM_CONSTRAINT_NON_INIT_ONLY,
tesseract_->params());
}
bool TessBaseAPI::SetDebugVariable(const char *name, const char *value) {<|fim_middle|>
/* Add all available languages recursively.
*/
static void addAvailableLanguages(const std::string &datadir,
|
#endif
/** Minimum sensible image size to be worth running Tesseract. */
const int kMinRectSize = 10;
/** Character returned when Tesseract couldn't recognize as anything. */
const char kTesseractReject = '~';
/** Character used by UNLV error counter as a reject. */
const char kUNLVReject = '~';
/** Character used by UNLV as a suspect marker. */
const char kUNLVSuspect = '^';
/**
* Temp file used for storing current parameters before applying retry values.
*/
static const char *kOldVarsFile = "failed_vars.txt";
#ifndef DISABLED_LEGACY_ENGINE
/**
* Filename used for input image file, from which to derive a name to search
* for a possible UNLV zone file, if none is specified by SetInputName.
*/
static const char *kInputFile = "noname.tif";
static const char kUnknownFontName[] = "UnknownFont";
static STRING_VAR(classify_font_name, kUnknownFontName,
"Default font name to be used in training");
// Finds the name of the training font and returns it in fontname, by cutting
// it out based on the expectation that the filename is of the form:
// /path/to/dir/[lang].[fontname].exp[num]
// The [lang], [fontname] and [num] fields should not have '.' characters.
// If the global parameter classify_font_name is set, its value is used instead.
static void ExtractFontName(const char* filename, std::string* fontname) {
*fontname = classify_font_name;
if (*fontname == kUnknownFontName) {
// filename is expected to be of the form [lang].[fontname].exp[num]
// The [lang], [fontname] and [num] fields should not have '.' characters.
const char *basename = strrchr(filename, '/');
const char *firstdot = strchr(basename ? basename : filename, '.');
const char *lastdot = strrchr(filename, '.');
if (firstdot != lastdot && firstdot != nullptr && lastdot != nullptr) {
++firstdot;
*fontname = firstdot;
fontname->resize(lastdot - firstdot);
}
}
}
#endif
|
/* Add all available languages recursively.
*/
static void addAvailableLanguages(const std::string &datadir,
|
std::vector<std::string> *langs) {
for (const auto& entry :
std::filesystem::recursive_directory_iterator(datadir,
std::filesystem::directory_options::follow_directory_symlink |
std::filesystem::directory_options::skip_permission_denied)) {
auto path = entry.path().lexically_relative(datadir);
if (path.extension() == ".traineddata") {
langs->push_back(path.replace_extension("").string());
}
}
}
TessBaseAPI::TessBaseAPI()
: tesseract_(nullptr)
, osd_tesseract_(nullptr)
, equ_detect_(nullptr)
, reader_(nullptr)
,
// thresholder_ is initialized to nullptr here, but will be set before use
// by: A constructor of a derived API or created
// implicitly when used in InternalSetImage.
thresholder_(nullptr)
, paragraph_models_(nullptr)
, block_list_(nullptr)
, page_res_(nullptr)
, last_oem_requested_(OEM_DEFAULT)
, recognition_done_(false)
, rect_left_(0)
, rect_top_(0)
, rect_width_(0)
, rect_height_(0)
, image_width_(0)
, image_height_(0) {
}
TessBaseAPI::~TessBaseAPI() {
End();
}
/**
* Returns the version identifier as a static string. Do not delete.
*/
const char *TessBaseAPI::Version() {
return TESSERACT_VERSION_STR;
}
/**
* Set the name of the input file. Needed only for training and
* loading a UNLV zone file.
*/
void TessBaseAPI::SetInputName(const char *name) {
input_file_ = name ? name : "";
}
/** Set the name of the output files. Needed only for debugging. */
void TessBaseAPI::SetOutputName(const char *name) {
output_file_ = name ? name : "";
}
bool TessBaseAPI::SetVariable(const char *name, const char *value) {
if (tesseract_ == nullptr) {
tesseract_ = new Tesseract;
}
return ParamUtils::SetParam(name, value, SET_PARAM_CONSTRAINT_NON_INIT_ONLY,
tesseract_->params());
}
bool TessBaseAPI::SetDebugVariable(const char *name, const char *value) {
|
random
|
<|fim_prefix|>// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "calibController.hpp"
#include <algorithm>
#include <cmath>
#include <ctime>
#include <opencv2/3d.hpp>
#include <opencv2/calib.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/imgcodecs.hpp>
using namespace cv;
double calib::calibController::estimateCoverageQuality()
{
int gridSize = 10;
int xGridStep = mCalibData->imageSize.width / gridSize;
int yGridStep = mCalibData->imageSize.height / gridSize;
std::vector<int> pointsInCell(gridSize*gridSize);
std::fill(pointsInCell.begin(), pointsInCell.end(), 0);
for(std::vector<std::vector<cv::Point2f> >::iterator it = mCalibData->imagePoints.begin(); it != mCalibData->imagePoints.end(); ++it)
for(std::vector<cv::Point2f>::iterator pointIt = (*it).begin(); pointIt != (*it).end(); ++pointIt) {
int i = (int)((*pointIt).x / xGridStep);
int j = (int)((*pointIt).y / yGridStep);
pointsInCell[i*gridSize + j]++;
}
for(std::vector<cv::Mat>::iterator it = mCalibData->allCharucoCorners.begin(); it != mCalibData->allCharucoCorners.end(); ++it)
for(int l = 0; l < (*it).size[0]; l++) {
int i = (int)((*it).at<float>(l, 0) / xGridStep);
int j = (int)((*it).at<float>(l, 1) / yGridStep);
pointsInCell[i*gridSize + j]++;
}
cv::Mat mean, stdDev;
cv::meanStdDev(pointsInCell, mean, stdDev);
return mean.at<double>(0) / (stdDev.at<double>(0) + 1e-7);
}
calib::calibController::calibController()
{
mCalibFlags = 0;
}<|fim_suffix|> mNeedTuning = autoTuning;
mMinFramesNum = minFramesNum;
mConfIntervalsState = false;
mCoverageQualityState = false;
}
void calib::calibController::updateState()
{
if(mCalibData->cameraMatrix.total()) {
const double relErrEps = 0.05;
bool fConfState = false, cConfState = false, dConfState = true;
if(sigmaMult*mCalibData->stdDeviations.at<double>(0) / mCalibData->cameraMatrix.at<double>(0,0) < relErrEps &&
sigmaMult*mCalibData->stdDeviations.at<double>(1) / mCalibData->cameraMatrix.at<double>(1,1) < relErrEps)
fConfState = true;
if(sigmaMult*mCalibData->stdDeviations.at<double>(2) / mCalibData->cameraMatrix.at<double>(0,2) < relErrEps &&
sigmaMult*mCalibData->stdDeviations.at<double>(3) / mCalibData->cameraMatrix.at<double>(1,2) < relErrEps)
cConfState = true;
for(int i = 0; i < 5; i++)
if(mCalibData->stdDeviations.at<double>(4+i) / fabs(mCalibData->distCoeffs.at<double>(i)) > 1)
dConfState = false;
mConfIntervalsState = fConfState && cConfState && dConfState;
}
if(getFramesNumberState())
mCoverageQualityState = estimateCoverageQuality() > 1.8 ? true : false;
if (getFramesNumberState() && mNeedTuning) {
if( !(mCalibFlags & cv::CALIB_FIX_ASPECT_RATIO) &&
mCalibData->cameraMatrix.total()) {
double fDiff = fabs(mCalibData->cameraMatrix.at<double>(0,0) -
mCalibData->cameraMatrix.at<double>(1,1));
if (fDiff < 3*mCalibData->stdDeviations.at<double>(0) &&
fDiff < 3*mCalibData->stdDeviations.at<double>(1)) {
mCalibFlags |= cv::CALIB_FIX_ASPECT_RATIO;
mCalibData->cameraMatrix.at<double>(0,0) =
mCalibData->cameraMatrix.at<double>(1,1);
}
}
if(!(mCalibFlags & cv::CALIB_ZERO_TANGENT_DIST)) {<|fim_middle|>
calib::calibController::calibController(cv::Ptr<calib::calibrationData> data, int initialFlags, bool autoTuning, int minFramesNum) :
mCalibData(data)
{
mCalibFlags = initialFlags;
|
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "calibController.hpp"
#include <algorithm>
#include <cmath>
#include <ctime>
#include <opencv2/3d.hpp>
#include <opencv2/calib.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/imgcodecs.hpp>
using namespace cv;
double calib::calibController::estimateCoverageQuality()
{
int gridSize = 10;
int xGridStep = mCalibData->imageSize.width / gridSize;
int yGridStep = mCalibData->imageSize.height / gridSize;
std::vector<int> pointsInCell(gridSize*gridSize);
std::fill(pointsInCell.begin(), pointsInCell.end(), 0);
for(std::vector<std::vector<cv::Point2f> >::iterator it = mCalibData->imagePoints.begin(); it != mCalibData->imagePoints.end(); ++it)
for(std::vector<cv::Point2f>::iterator pointIt = (*it).begin(); pointIt != (*it).end(); ++pointIt) {
int i = (int)((*pointIt).x / xGridStep);
int j = (int)((*pointIt).y / yGridStep);
pointsInCell[i*gridSize + j]++;
}
for(std::vector<cv::Mat>::iterator it = mCalibData->allCharucoCorners.begin(); it != mCalibData->allCharucoCorners.end(); ++it)
for(int l = 0; l < (*it).size[0]; l++) {
int i = (int)((*it).at<float>(l, 0) / xGridStep);
int j = (int)((*it).at<float>(l, 1) / yGridStep);
pointsInCell[i*gridSize + j]++;
}
cv::Mat mean, stdDev;
cv::meanStdDev(pointsInCell, mean, stdDev);
return mean.at<double>(0) / (stdDev.at<double>(0) + 1e-7);
}
calib::calibController::calibController()
{
mCalibFlags = 0;
}
|
calib::calibController::calibController(cv::Ptr<calib::calibrationData> data, int initialFlags, bool autoTuning, int minFramesNum) :
mCalibData(data)
{
mCalibFlags = initialFlags;
|
mNeedTuning = autoTuning;
mMinFramesNum = minFramesNum;
mConfIntervalsState = false;
mCoverageQualityState = false;
}
void calib::calibController::updateState()
{
if(mCalibData->cameraMatrix.total()) {
const double relErrEps = 0.05;
bool fConfState = false, cConfState = false, dConfState = true;
if(sigmaMult*mCalibData->stdDeviations.at<double>(0) / mCalibData->cameraMatrix.at<double>(0,0) < relErrEps &&
sigmaMult*mCalibData->stdDeviations.at<double>(1) / mCalibData->cameraMatrix.at<double>(1,1) < relErrEps)
fConfState = true;
if(sigmaMult*mCalibData->stdDeviations.at<double>(2) / mCalibData->cameraMatrix.at<double>(0,2) < relErrEps &&
sigmaMult*mCalibData->stdDeviations.at<double>(3) / mCalibData->cameraMatrix.at<double>(1,2) < relErrEps)
cConfState = true;
for(int i = 0; i < 5; i++)
if(mCalibData->stdDeviations.at<double>(4+i) / fabs(mCalibData->distCoeffs.at<double>(i)) > 1)
dConfState = false;
mConfIntervalsState = fConfState && cConfState && dConfState;
}
if(getFramesNumberState())
mCoverageQualityState = estimateCoverageQuality() > 1.8 ? true : false;
if (getFramesNumberState() && mNeedTuning) {
if( !(mCalibFlags & cv::CALIB_FIX_ASPECT_RATIO) &&
mCalibData->cameraMatrix.total()) {
double fDiff = fabs(mCalibData->cameraMatrix.at<double>(0,0) -
mCalibData->cameraMatrix.at<double>(1,1));
if (fDiff < 3*mCalibData->stdDeviations.at<double>(0) &&
fDiff < 3*mCalibData->stdDeviations.at<double>(1)) {
mCalibFlags |= cv::CALIB_FIX_ASPECT_RATIO;
mCalibData->cameraMatrix.at<double>(0,0) =
mCalibData->cameraMatrix.at<double>(1,1);
}
}
if(!(mCalibFlags & cv::CALIB_ZERO_TANGENT_DIST)) {
|
random
|
<|fim_prefix|> case PT_NOISE:
tprintf("TODO: Please report image which triggers the noise case.\n");
ASSERT_HOST(false);
default:
break;
}
const std::unique_ptr<const char[]> para_text(it->GetUTF8Text(RIL_PARA));
text += para_text.get();
} while (it->Next(RIL_PARA));
return copy_string(text);
}
static void AddBoxToTSV(const PageIterator *it, PageIteratorLevel level, std::string &text) {
int left, top, right, bottom;
it->BoundingBox(level, &left, &top, &right, &bottom);
text += "\t" + std::to_string(left);
text += "\t" + std::to_string(top);
text += "\t" + std::to_string(right - left);
text += "\t" + std::to_string(bottom - top);
}
/**
* Make a TSV-formatted string from the internal data structures.
* page_number is 0-based but will appear in the output as 1-based.
* Returned string must be freed with the delete [] operator.
*/
char *TessBaseAPI::GetTSVText(int page_number) {
if (tesseract_ == nullptr || (page_res_ == nullptr && Recognize(nullptr) < 0)) {
return nullptr;
}
#if !defined(NDEBUG)
int lcnt = 1, bcnt = 1, pcnt = 1, wcnt = 1;
#endif
int page_id = page_number + 1; // we use 1-based page numbers.
int page_num = page_id;
int block_num = 0;
int par_num = 0;
int line_num = 0;
int word_num = 0;
std::string tsv_str;
tsv_str += "1\t" + std::to_string(page_num); // level 1 - page
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
tsv_str += "\t" + std::to_string(rect_left_);
tsv_str += "\t" + std::to_string(rect_top_);
tsv_str += "\t" + std::to_string(rect_width_);
tsv_str += "\t" + std::to_string(rect_height_);
tsv_str += "\t-1\t\n";
const std::unique_ptr</*non-const*/ ResultIterator> res_it(GetIterator());
while (!res_it->Empty(RIL_BLOCK)) {
if (res_it->Empty(RIL_WORD)) {
res_it->Next(RIL_WORD);
continue;<|fim_suffix|> line_num = 0;
word_num = 0;
tsv_str += "2\t" + std::to_string(page_num); // level 2 - block
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_BLOCK, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for block
}
if (res_it->IsAtBeginningOf(RIL_PARA)) {
par_num++;
line_num = 0;
word_num = 0;
tsv_str += "3\t" + std::to_string(page_num); // level 3 - paragraph
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_PARA, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for para
}
if (res_it->IsAtBeginningOf(RIL_TEXTLINE)) {
line_num++;
word_num = 0;
tsv_str += "4\t" + std::to_string(page_num); // level 4 - line
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_TEXTLINE, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for line
}
// Now, process the word...
int left, top, right, bottom;
res_it->BoundingBox(RIL_WORD, &left, &top, &right, &bottom);
word_num++;
tsv_str += "5\t" + std::to_string(page_num); // level 5 - word
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
tsv_str += "\t" + std::to_string(left);
tsv_str += "\t" + std::to_string(top);
tsv_str += "\t" + std::to_string(right - left);
tsv_str += "\t" + std::to_string(bottom - top);<|fim_middle|> }
// Add rows for any new block/paragraph/textline.
if (res_it->IsAtBeginningOf(RIL_BLOCK)) {
block_num++;
par_num = 0;
|
case PT_NOISE:
tprintf("TODO: Please report image which triggers the noise case.\n");
ASSERT_HOST(false);
default:
break;
}
const std::unique_ptr<const char[]> para_text(it->GetUTF8Text(RIL_PARA));
text += para_text.get();
} while (it->Next(RIL_PARA));
return copy_string(text);
}
static void AddBoxToTSV(const PageIterator *it, PageIteratorLevel level, std::string &text) {
int left, top, right, bottom;
it->BoundingBox(level, &left, &top, &right, &bottom);
text += "\t" + std::to_string(left);
text += "\t" + std::to_string(top);
text += "\t" + std::to_string(right - left);
text += "\t" + std::to_string(bottom - top);
}
/**
* Make a TSV-formatted string from the internal data structures.
* page_number is 0-based but will appear in the output as 1-based.
* Returned string must be freed with the delete [] operator.
*/
char *TessBaseAPI::GetTSVText(int page_number) {
if (tesseract_ == nullptr || (page_res_ == nullptr && Recognize(nullptr) < 0)) {
return nullptr;
}
#if !defined(NDEBUG)
int lcnt = 1, bcnt = 1, pcnt = 1, wcnt = 1;
#endif
int page_id = page_number + 1; // we use 1-based page numbers.
int page_num = page_id;
int block_num = 0;
int par_num = 0;
int line_num = 0;
int word_num = 0;
std::string tsv_str;
tsv_str += "1\t" + std::to_string(page_num); // level 1 - page
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
tsv_str += "\t" + std::to_string(rect_left_);
tsv_str += "\t" + std::to_string(rect_top_);
tsv_str += "\t" + std::to_string(rect_width_);
tsv_str += "\t" + std::to_string(rect_height_);
tsv_str += "\t-1\t\n";
const std::unique_ptr</*non-const*/ ResultIterator> res_it(GetIterator());
while (!res_it->Empty(RIL_BLOCK)) {
if (res_it->Empty(RIL_WORD)) {
res_it->Next(RIL_WORD);
continue;
|
}
// Add rows for any new block/paragraph/textline.
if (res_it->IsAtBeginningOf(RIL_BLOCK)) {
block_num++;
par_num = 0;
|
line_num = 0;
word_num = 0;
tsv_str += "2\t" + std::to_string(page_num); // level 2 - block
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_BLOCK, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for block
}
if (res_it->IsAtBeginningOf(RIL_PARA)) {
par_num++;
line_num = 0;
word_num = 0;
tsv_str += "3\t" + std::to_string(page_num); // level 3 - paragraph
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_PARA, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for para
}
if (res_it->IsAtBeginningOf(RIL_TEXTLINE)) {
line_num++;
word_num = 0;
tsv_str += "4\t" + std::to_string(page_num); // level 4 - line
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_TEXTLINE, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for line
}
// Now, process the word...
int left, top, right, bottom;
res_it->BoundingBox(RIL_WORD, &left, &top, &right, &bottom);
word_num++;
tsv_str += "5\t" + std::to_string(page_num); // level 5 - word
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
tsv_str += "\t" + std::to_string(left);
tsv_str += "\t" + std::to_string(top);
tsv_str += "\t" + std::to_string(right - left);
tsv_str += "\t" + std::to_string(bottom - top);
|
random
|
<|fim_prefix|> return;
ImGui_ImplAllegro5_Data* bd = ImGui_ImplAllegro5_GetBackendData();
ImGuiMouseCursor imgui_cursor = ImGui::GetMouseCursor();
// Hide OS mouse cursor if imgui is drawing it
if (io.MouseDrawCursor)
imgui_cursor = ImGuiMouseCursor_None;
if (bd->LastCursor == imgui_cursor)
return;
bd->LastCursor = imgui_cursor;
if (imgui_cursor == ImGuiMouseCursor_None)
{
al_set_mouse_cursor(bd->Display, bd->MouseCursorInvisible);
}
else
{
ALLEGRO_SYSTEM_MOUSE_CURSOR cursor_id = ALLEGRO_SYSTEM_MOUSE_CURSOR_DEFAULT;
switch (imgui_cursor)
{
case ImGuiMouseCursor_TextInput: cursor_id = ALLEGRO_SYSTEM_MOUSE_CURSOR_EDIT; break;
case ImGuiMouseCursor_ResizeAll: cursor_id = ALLEGRO_SYSTEM_MOUSE_CURSOR_MOVE; break;
case ImGuiMouseCursor_ResizeNS: cursor_id = ALLEGRO_SYSTEM_MOUSE_CURSOR_RESIZE_N; break;
case ImGuiMouseCursor_ResizeEW: cursor_id = ALLEGRO_SYSTEM_MOUSE_CURSOR_RESIZE_E; break;
case ImGuiMouseCursor_ResizeNESW: cursor_id = ALLEGRO_SYSTEM_MOUSE_CURSOR_RESIZE_NE; break;
case ImGuiMouseCursor_ResizeNWSE: cursor_id = ALLEGRO_SYSTEM_MOUSE_CURSOR_RESIZE_NW; break;
case ImGuiMouseCursor_Wait: cursor_id = ALLEGRO_SYSTEM_MOUSE_CURSOR_BUSY; break;
case ImGuiMouseCursor_Progress: cursor_id = ALLEGRO_SYSTEM_MOUSE_CURSOR_PROGRESS; break;
case ImGuiMouseCursor_NotAllowed: cursor_id = ALLEGRO_SYSTEM_MOUSE_CURSOR_UNAVAILABLE; break;
}
al_set_system_mouse_cursor(bd->Display, cursor_id);
}
}
void ImGui_ImplAllegro5_NewFrame()
{
ImGui_ImplAllegro5_Data* bd = ImGui_ImplAllegro5_GetBackendData();
IM_ASSERT(bd != nullptr && "Context or backend not initialized! Did you call ImGui_ImplAllegro5_Init()?");
if (!bd->MouseCursorInvisible)
ImGui_ImplAllegro5_CreateDeviceObjects();
// Setup display size (every frame to accommodate for window resizing)<|fim_suffix|> bd->Time = current_time;
// Allegro 5 doesn't receive PrintScreen under Windows
#ifdef _WIN32
io.AddKeyEvent(ImGuiKey_PrintScreen, (::GetAsyncKeyState(VK_SNAPSHOT) & 0x8000) != 0);
#endif
// Setup mouse cursor shape
ImGui_ImplAllegro5_UpdateMouseCursor();
}
//-----------------------------------------------------------------------------
#endif // #ifndef IMGUI_DISABLE
<|fim_middle|> ImGuiIO& io = ImGui::GetIO();
int w, h;
w = al_get_display_width(bd->Display);
h = al_get_display_height(bd->Display);
io.DisplaySize = ImVec2((float)w, (float)h);
// Setup time step
double current_time = al_get_time();
io.DeltaTime = bd->Time > 0.0 ? (float)(current_time - bd->Time) : (float)(1.0f / 60.0f);
|
return;
ImGui_ImplAllegro5_Data* bd = ImGui_ImplAllegro5_GetBackendData();
ImGuiMouseCursor imgui_cursor = ImGui::GetMouseCursor();
// Hide OS mouse cursor if imgui is drawing it
if (io.MouseDrawCursor)
imgui_cursor = ImGuiMouseCursor_None;
if (bd->LastCursor == imgui_cursor)
return;
bd->LastCursor = imgui_cursor;
if (imgui_cursor == ImGuiMouseCursor_None)
{
al_set_mouse_cursor(bd->Display, bd->MouseCursorInvisible);
}
else
{
ALLEGRO_SYSTEM_MOUSE_CURSOR cursor_id = ALLEGRO_SYSTEM_MOUSE_CURSOR_DEFAULT;
switch (imgui_cursor)
{
case ImGuiMouseCursor_TextInput: cursor_id = ALLEGRO_SYSTEM_MOUSE_CURSOR_EDIT; break;
case ImGuiMouseCursor_ResizeAll: cursor_id = ALLEGRO_SYSTEM_MOUSE_CURSOR_MOVE; break;
case ImGuiMouseCursor_ResizeNS: cursor_id = ALLEGRO_SYSTEM_MOUSE_CURSOR_RESIZE_N; break;
case ImGuiMouseCursor_ResizeEW: cursor_id = ALLEGRO_SYSTEM_MOUSE_CURSOR_RESIZE_E; break;
case ImGuiMouseCursor_ResizeNESW: cursor_id = ALLEGRO_SYSTEM_MOUSE_CURSOR_RESIZE_NE; break;
case ImGuiMouseCursor_ResizeNWSE: cursor_id = ALLEGRO_SYSTEM_MOUSE_CURSOR_RESIZE_NW; break;
case ImGuiMouseCursor_Wait: cursor_id = ALLEGRO_SYSTEM_MOUSE_CURSOR_BUSY; break;
case ImGuiMouseCursor_Progress: cursor_id = ALLEGRO_SYSTEM_MOUSE_CURSOR_PROGRESS; break;
case ImGuiMouseCursor_NotAllowed: cursor_id = ALLEGRO_SYSTEM_MOUSE_CURSOR_UNAVAILABLE; break;
}
al_set_system_mouse_cursor(bd->Display, cursor_id);
}
}
void ImGui_ImplAllegro5_NewFrame()
{
ImGui_ImplAllegro5_Data* bd = ImGui_ImplAllegro5_GetBackendData();
IM_ASSERT(bd != nullptr && "Context or backend not initialized! Did you call ImGui_ImplAllegro5_Init()?");
if (!bd->MouseCursorInvisible)
ImGui_ImplAllegro5_CreateDeviceObjects();
// Setup display size (every frame to accommodate for window resizing)
|
ImGuiIO& io = ImGui::GetIO();
int w, h;
w = al_get_display_width(bd->Display);
h = al_get_display_height(bd->Display);
io.DisplaySize = ImVec2((float)w, (float)h);
// Setup time step
double current_time = al_get_time();
io.DeltaTime = bd->Time > 0.0 ? (float)(current_time - bd->Time) : (float)(1.0f / 60.0f);
|
bd->Time = current_time;
// Allegro 5 doesn't receive PrintScreen under Windows
#ifdef _WIN32
io.AddKeyEvent(ImGuiKey_PrintScreen, (::GetAsyncKeyState(VK_SNAPSHOT) & 0x8000) != 0);
#endif
// Setup mouse cursor shape
ImGui_ImplAllegro5_UpdateMouseCursor();
}
//-----------------------------------------------------------------------------
#endif // #ifndef IMGUI_DISABLE
|
random
|
<|fim_prefix|> }
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->commit_action();
}
scaling_selection = false;
scaling_selection_scale = Vector2(1, 1);
scaling_selection_offset = Vector2();
queue_redraw();
}
Ref<InputEventMouseMotion> mm = p_event;
if (moving_selection_attempt && mm.is_valid()) {
Point2 new_pos = mm->get_position();
if (mm->is_alt_pressed()) { // Axis snap key move when alt is pressed
if (Math::abs(new_pos.x - moving_selection_mouse_begin.x) > Math::abs(new_pos.y - moving_selection_mouse_begin.y)) {
new_pos.y = moving_selection_mouse_begin.y;
} else {
new_pos.x = moving_selection_mouse_begin.x;
}
}
if (!moving_selection) {
moving_selection = true;
select_single_attempt = IntPair(-1, -1);
}
if (!read_only) {
float y = (get_size().height / 2.0 - new_pos.y) * timeline_v_zoom + timeline_v_scroll;
float moving_selection_begin_time = ((moving_selection_mouse_begin.x - limit) / timeline->get_zoom_scale()) + timeline->get_value();
float new_time = ((new_pos.x - limit) / timeline->get_zoom_scale()) + timeline->get_value();
float moving_selection_pivot = moving_selection_from_key != -1 ? animation->track_get_key_time(moving_selection_from_track, moving_selection_from_key) : 0;
float time_delta = new_time - moving_selection_begin_time;
float snapped_time = editor->snap_time(moving_selection_pivot + time_delta);
float time_offset = 0.0;
if (std::abs(moving_selection_offset.x) > CMP_EPSILON || (snapped_time > moving_selection_pivot && time_delta > CMP_EPSILON) || (snapped_time < moving_selection_pivot && time_delta < -CMP_EPSILON)) {
time_offset = snapped_time - moving_selection_pivot;
}
float moving_selection_begin_value;
if (moving_selection_from_key == -1) {
moving_selection_begin_value = (<|fim_suffix|>.height / 2.0 - moving_selection_mouse_begin.y) * timeline_v_zoom + timeline_v_scroll;
} else {
moving_selection_begin_value = animation->bezier_track_get_key_value(moving_selection_from_track, moving_selection_from_key);
}
float y_offset = y - moving_selection_begin_value;
moving_selection_offset = Vector2(time_offset, y_offset);
}
additional_moving_handle_lefts.clear();
additional_moving_handle_rights.clear();
queue_redraw();
}
if (box_selecting_attempt && mm.is_valid()) {
if (!box_selecting) {
box_selecting = true;
box_selecting_add = mm->is_shift_pressed();
}
box_selection_to = mm->get_position();
queue_redraw();
}
if (scaling_selection && mm.is_valid() && !read_only) {
Point2 mp = mm->get_position();
const int handle_length = Math::round((selection_handles_rect.size.width - selection_rect.size.width) / 4.0);
Point2 rel_pos;
// Calculate the scale according with the distance between the mouse's position (adjusted so that the cursor appears inside the handles)
// and the opposite end of the `selection_rect`.
if (scaling_selection_handles.x != 0) {
if (scaling_selection_handles.x == 1) { // Right Handle
const int handle_adjust = Math::round(mp.x - (scaling_selection_scale.x >= 0 ? selection_rect.position.x : (selection_rect.position.x + selection_rect.size.width)));
mp.x -= MIN(Math::abs(handle_adjust), handle_length) * scaling_selection_handles.x * SIGN(handle_adjust);
if (editor->is_snap_keys_enabled()) {
mp.x = editor->snap_time((mp.x - limit) / timeline->get_zoom_scale(), true) + timeline->get_value();
mp.x = (mp.x - timeline->get_value()) * timeline->get_zoom_scale() + limit;
}
rel_pos.x = scaling_selection_scale.x >= 0 ? (mp.x - selection_rect.position.x) : selection_rect.position.x + selection_rect.size.width - mp.x;
} else { // Left Handle
const int handle_adjust = Math::round((scaling_selection_scale.x >= 0 ? (selection_rect.position.x + selection_rect.<|fim_middle|>get_size()
|
}
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->commit_action();
}
scaling_selection = false;
scaling_selection_scale = Vector2(1, 1);
scaling_selection_offset = Vector2();
queue_redraw();
}
Ref<InputEventMouseMotion> mm = p_event;
if (moving_selection_attempt && mm.is_valid()) {
Point2 new_pos = mm->get_position();
if (mm->is_alt_pressed()) { // Axis snap key move when alt is pressed
if (Math::abs(new_pos.x - moving_selection_mouse_begin.x) > Math::abs(new_pos.y - moving_selection_mouse_begin.y)) {
new_pos.y = moving_selection_mouse_begin.y;
} else {
new_pos.x = moving_selection_mouse_begin.x;
}
}
if (!moving_selection) {
moving_selection = true;
select_single_attempt = IntPair(-1, -1);
}
if (!read_only) {
float y = (get_size().height / 2.0 - new_pos.y) * timeline_v_zoom + timeline_v_scroll;
float moving_selection_begin_time = ((moving_selection_mouse_begin.x - limit) / timeline->get_zoom_scale()) + timeline->get_value();
float new_time = ((new_pos.x - limit) / timeline->get_zoom_scale()) + timeline->get_value();
float moving_selection_pivot = moving_selection_from_key != -1 ? animation->track_get_key_time(moving_selection_from_track, moving_selection_from_key) : 0;
float time_delta = new_time - moving_selection_begin_time;
float snapped_time = editor->snap_time(moving_selection_pivot + time_delta);
float time_offset = 0.0;
if (std::abs(moving_selection_offset.x) > CMP_EPSILON || (snapped_time > moving_selection_pivot && time_delta > CMP_EPSILON) || (snapped_time < moving_selection_pivot && time_delta < -CMP_EPSILON)) {
time_offset = snapped_time - moving_selection_pivot;
}
float moving_selection_begin_value;
if (moving_selection_from_key == -1) {
moving_selection_begin_value = (
|
get_size()
|
.height / 2.0 - moving_selection_mouse_begin.y) * timeline_v_zoom + timeline_v_scroll;
} else {
moving_selection_begin_value = animation->bezier_track_get_key_value(moving_selection_from_track, moving_selection_from_key);
}
float y_offset = y - moving_selection_begin_value;
moving_selection_offset = Vector2(time_offset, y_offset);
}
additional_moving_handle_lefts.clear();
additional_moving_handle_rights.clear();
queue_redraw();
}
if (box_selecting_attempt && mm.is_valid()) {
if (!box_selecting) {
box_selecting = true;
box_selecting_add = mm->is_shift_pressed();
}
box_selection_to = mm->get_position();
queue_redraw();
}
if (scaling_selection && mm.is_valid() && !read_only) {
Point2 mp = mm->get_position();
const int handle_length = Math::round((selection_handles_rect.size.width - selection_rect.size.width) / 4.0);
Point2 rel_pos;
// Calculate the scale according with the distance between the mouse's position (adjusted so that the cursor appears inside the handles)
// and the opposite end of the `selection_rect`.
if (scaling_selection_handles.x != 0) {
if (scaling_selection_handles.x == 1) { // Right Handle
const int handle_adjust = Math::round(mp.x - (scaling_selection_scale.x >= 0 ? selection_rect.position.x : (selection_rect.position.x + selection_rect.size.width)));
mp.x -= MIN(Math::abs(handle_adjust), handle_length) * scaling_selection_handles.x * SIGN(handle_adjust);
if (editor->is_snap_keys_enabled()) {
mp.x = editor->snap_time((mp.x - limit) / timeline->get_zoom_scale(), true) + timeline->get_value();
mp.x = (mp.x - timeline->get_value()) * timeline->get_zoom_scale() + limit;
}
rel_pos.x = scaling_selection_scale.x >= 0 ? (mp.x - selection_rect.position.x) : selection_rect.position.x + selection_rect.size.width - mp.x;
} else { // Left Handle
const int handle_adjust = Math::round((scaling_selection_scale.x >= 0 ? (selection_rect.position.x + selection_rect.
|
ast_based
|
<|fim_prefix|>alAvgErr;
cv::Size imageSize;
std::vector<cv::Mat> allFrames;
std::vector<std::vector<cv::Point2f> > imagePoints;
std::vector< std::vector<cv::Point3f> > objectPoints;
std::vector<cv::Mat> allCharucoCorners;
std::vector<cv::Mat> allCharucoIds;
cv::Mat undistMap1, undistMap2;
calibrationData()
{
imageSize = cv::Size(IMAGE_MAX_WIDTH, IMAGE_MAX_HEIGHT);
}
};
struct cameraParameters
{
cv::Mat cameraMatrix;
cv::Mat distCoeffs;
cv::Mat stdDeviations;
double avgError;
cameraParameters(){}
cameraParameters(cv::Mat& _cameraMatrix, cv::Mat& _distCoeffs, cv::Mat& _stdDeviations, double _avgError = 0) :
cameraMatrix(_cameraMatrix), distCoeffs(_distCoeffs), stdDeviations(_stdDeviations), avgError(_avgError)
{}
};
struct captureParameters
{
InputType captureMethod;
InputVideoSource source;
TemplateType board;
cv::Size inputBoardSize;
cv::Size boardSizeInnerCorners; // board size in inner corners for chessboard
cv::Size boardSizeUnits; // board size in squares, circles, etc.
int charucoDictName;
std::string charucoDictFile;
int calibrationStep;
float charucoSquareLength, charucoMarkerSize;
float captureDelay;
float squareSize;
float templDst;
std::string videoFileName;
bool flipVertical;
int camID;
int camBackend;
int fps;
cv::Size cameraResolution;
int maxFramesNum;
int minFramesNum;
bool saveFrames;
float zoom;
bool forceReopen;
captureParameters()
{
calibrationStep = 1;
captureDelay = 500.f;
maxFramesNum = 30;
minFramesNum = 10;
fps = 30;
cameraResolution = cv::Size(IMAGE_MAX_WIDTH, IMAGE_MAX_HEIGHT);
<|fim_suffix|>;
}
};
struct internalParameters
{
double solverEps;
int solverMaxIters;
bool fastSolving;
bool rationalModel;
bool thinPrismModel;
bool tiltedModel;
double filterAlpha;
internalParameters()
{
solverEps = 1e-7;
solverMaxIters = 30;
fastSolving = false;
rationalModel = false;
thinPrismModel = false;
tiltedModel = false;
filterAlpha = 0.1;
}
};
}
#endif
<|fim_middle|>saveFrames = false
|
alAvgErr;
cv::Size imageSize;
std::vector<cv::Mat> allFrames;
std::vector<std::vector<cv::Point2f> > imagePoints;
std::vector< std::vector<cv::Point3f> > objectPoints;
std::vector<cv::Mat> allCharucoCorners;
std::vector<cv::Mat> allCharucoIds;
cv::Mat undistMap1, undistMap2;
calibrationData()
{
imageSize = cv::Size(IMAGE_MAX_WIDTH, IMAGE_MAX_HEIGHT);
}
};
struct cameraParameters
{
cv::Mat cameraMatrix;
cv::Mat distCoeffs;
cv::Mat stdDeviations;
double avgError;
cameraParameters(){}
cameraParameters(cv::Mat& _cameraMatrix, cv::Mat& _distCoeffs, cv::Mat& _stdDeviations, double _avgError = 0) :
cameraMatrix(_cameraMatrix), distCoeffs(_distCoeffs), stdDeviations(_stdDeviations), avgError(_avgError)
{}
};
struct captureParameters
{
InputType captureMethod;
InputVideoSource source;
TemplateType board;
cv::Size inputBoardSize;
cv::Size boardSizeInnerCorners; // board size in inner corners for chessboard
cv::Size boardSizeUnits; // board size in squares, circles, etc.
int charucoDictName;
std::string charucoDictFile;
int calibrationStep;
float charucoSquareLength, charucoMarkerSize;
float captureDelay;
float squareSize;
float templDst;
std::string videoFileName;
bool flipVertical;
int camID;
int camBackend;
int fps;
cv::Size cameraResolution;
int maxFramesNum;
int minFramesNum;
bool saveFrames;
float zoom;
bool forceReopen;
captureParameters()
{
calibrationStep = 1;
captureDelay = 500.f;
maxFramesNum = 30;
minFramesNum = 10;
fps = 30;
cameraResolution = cv::Size(IMAGE_MAX_WIDTH, IMAGE_MAX_HEIGHT);
|
saveFrames = false
|
;
}
};
struct internalParameters
{
double solverEps;
int solverMaxIters;
bool fastSolving;
bool rationalModel;
bool thinPrismModel;
bool tiltedModel;
double filterAlpha;
internalParameters()
{
solverEps = 1e-7;
solverMaxIters = 30;
fastSolving = false;
rationalModel = false;
thinPrismModel = false;
tiltedModel = false;
filterAlpha = 0.1;
}
};
}
#endif
|
ast_based
|
<|fim_prefix|>#pragma once
#include <cstdint>
#include <string>
#include <vector>
namespace hex::trace {
struct StackFrame {
std::string file;
std::string function;
std::uint32_t line;
};
using StackTrace = std::vector<StackFrame>;
void initialize();
struct StackTraceResult {
std::vector<StackFrame> stackFrames;
<|fim_suffix|>
};
StackTraceResult getStackTrace();
[[nodiscard]] std::string demangle(const std::string &mangledName);
}<|fim_middle|>std::string implementationName;
|
#pragma once
#include <cstdint>
#include <string>
#include <vector>
namespace hex::trace {
struct StackFrame {
std::string file;
std::string function;
std::uint32_t line;
};
using StackTrace = std::vector<StackFrame>;
void initialize();
struct StackTraceResult {
std::vector<StackFrame> stackFrames;
|
std::string implementationName;
|
};
StackTraceResult getStackTrace();
[[nodiscard]] std::string demangle(const std::string &mangledName);
}
|
ast_based
|
<|fim_prefix|> sigmaMult*mCalibData->stdDeviations.at<double>(1) / mCalibData->cameraMatrix.at<double>(1,1) < relErrEps)
fConfState = true;
if(sigmaMult*mCalibData->stdDeviations.at<double>(2) / mCalibData->cameraMatrix.at<double>(0,2) < relErrEps &&
sigmaMult*mCalibData->stdDeviations.at<double>(3) / mCalibData->cameraMatrix.at<double>(1,2) < relErrEps)
cConfState = true;
for(int i = 0; i < 5; i++)
if(mCalibData->stdDeviations.at<double>(4+i) / fabs(mCalibData->distCoeffs.at<double>(i)) > 1)
dConfState = false;
mConfIntervalsState = fConfState && cConfState && dConfState;
}
if(getFramesNumberState())
mCoverageQualityState = estimateCoverageQuality() > 1.8 ? true : false;
if (getFramesNumberState() && mNeedTuning) {
if( !(mCalibFlags & cv::CALIB_FIX_ASPECT_RATIO) &&
mCalibData->cameraMatrix.total()) {
double fDiff = fabs(mCalibData->cameraMatrix.at<double>(0,0) -
mCalibData->cameraMatrix.at<double>(1,1));
if (fDiff < 3*mCalibData->stdDeviations.at<double>(0) &&
fDiff < 3*mCalibData->stdDeviations.at<double>(1)) {
mCalibFlags |= cv::CALIB_FIX_ASPECT_RATIO;
mCalibData->cameraMatrix.at<double>(0,0) =
mCalibData->cameraMatrix.at<double>(1,1);
}
}
if(!(mCalibFlags & cv::CALIB_ZERO_TANGENT_DIST)) {
const double eps = 0.005;
if(fabs(mCalibData->distCoeffs.at<double>(2)) < eps &&
fabs(mCalibData->distCoeffs.at<double>(3)) < eps)
mCalibFlags |= cv::CALIB_ZERO_TANGENT_DIST;
}
if(!(mCalibFlags & cv::CALIB_FIX_K1)) {
const double eps = 0.005;
if(fabs(mCalibData->distCoeffs.at<double>(0)) < eps)
mCalibFlags |= cv::CALIB_FIX_K1;
}
<|fim_suffix|> }
if(!(mCalibFlags & cv::CALIB_FIX_K3)) {
const double eps = 0.005;
if(fabs(mCalibData->distCoeffs.at<double>(4)) < eps)
mCalibFlags |= cv::CALIB_FIX_K3;
}
}
}
bool calib::calibController::getCommonCalibrationState() const
{
int rating = (int)getFramesNumberState() + (int)getConfidenceIntrervalsState() +
(int)getRMSState() + (int)mCoverageQualityState;
return rating == 4;
}
bool calib::calibController::getFramesNumberState() const
{
return std::max(mCalibData->imagePoints.size(), mCalibData->allCharucoCorners.size()) > mMinFramesNum;
}
bool calib::calibController::getConfidenceIntrervalsState() const
{
return mConfIntervalsState;
}
bool calib::calibController::getRMSState() const
{
return mCalibData->totalAvgErr < 0.5;
}
int calib::calibController::getNewFlags() const
{
return mCalibFlags;
}
//////////////////// calibDataController
double calib::calibDataController::estimateGridSubsetQuality(size_t excludedIndex)
{
{
int gridSize = 10;
int xGridStep = mCalibData->imageSize.width / gridSize;
int yGridStep = mCalibData->imageSize.height / gridSize;
std::vector<int> pointsInCell(gridSize*gridSize);
std::fill(pointsInCell.begin(), pointsInCell.end(), 0);
for(size_t k = 0; k < mCalibData->imagePoints.size(); k++)
if(k != excludedIndex)
for(std::vector<cv::Point2f>::iterator pointIt = mCalibData->imagePoints[k].begin(); pointIt != mCalibData->imagePoints[k].end(); ++pointIt) {
int i = (int)((*pointIt).x / xGridStep);
int j = (int)((*pointIt).y / yGridStep);
pointsInCell[i*gridSize + j]++;
}
for(size_t k = 0; k < mCalibData->allCharucoCorners.size(); k++)
if(k != excludedIndex)
for(int l = 0; l < mCalibData->allCharucoCorners[k].size[0]; l++) {<|fim_middle|> if(!(mCalibFlags & cv::CALIB_FIX_K2)) {
const double eps = 0.005;
if(fabs(mCalibData->distCoeffs.at<double>(1)) < eps)
mCalibFlags |= cv::CALIB_FIX_K2;
|
sigmaMult*mCalibData->stdDeviations.at<double>(1) / mCalibData->cameraMatrix.at<double>(1,1) < relErrEps)
fConfState = true;
if(sigmaMult*mCalibData->stdDeviations.at<double>(2) / mCalibData->cameraMatrix.at<double>(0,2) < relErrEps &&
sigmaMult*mCalibData->stdDeviations.at<double>(3) / mCalibData->cameraMatrix.at<double>(1,2) < relErrEps)
cConfState = true;
for(int i = 0; i < 5; i++)
if(mCalibData->stdDeviations.at<double>(4+i) / fabs(mCalibData->distCoeffs.at<double>(i)) > 1)
dConfState = false;
mConfIntervalsState = fConfState && cConfState && dConfState;
}
if(getFramesNumberState())
mCoverageQualityState = estimateCoverageQuality() > 1.8 ? true : false;
if (getFramesNumberState() && mNeedTuning) {
if( !(mCalibFlags & cv::CALIB_FIX_ASPECT_RATIO) &&
mCalibData->cameraMatrix.total()) {
double fDiff = fabs(mCalibData->cameraMatrix.at<double>(0,0) -
mCalibData->cameraMatrix.at<double>(1,1));
if (fDiff < 3*mCalibData->stdDeviations.at<double>(0) &&
fDiff < 3*mCalibData->stdDeviations.at<double>(1)) {
mCalibFlags |= cv::CALIB_FIX_ASPECT_RATIO;
mCalibData->cameraMatrix.at<double>(0,0) =
mCalibData->cameraMatrix.at<double>(1,1);
}
}
if(!(mCalibFlags & cv::CALIB_ZERO_TANGENT_DIST)) {
const double eps = 0.005;
if(fabs(mCalibData->distCoeffs.at<double>(2)) < eps &&
fabs(mCalibData->distCoeffs.at<double>(3)) < eps)
mCalibFlags |= cv::CALIB_ZERO_TANGENT_DIST;
}
if(!(mCalibFlags & cv::CALIB_FIX_K1)) {
const double eps = 0.005;
if(fabs(mCalibData->distCoeffs.at<double>(0)) < eps)
mCalibFlags |= cv::CALIB_FIX_K1;
}
|
if(!(mCalibFlags & cv::CALIB_FIX_K2)) {
const double eps = 0.005;
if(fabs(mCalibData->distCoeffs.at<double>(1)) < eps)
mCalibFlags |= cv::CALIB_FIX_K2;
|
}
if(!(mCalibFlags & cv::CALIB_FIX_K3)) {
const double eps = 0.005;
if(fabs(mCalibData->distCoeffs.at<double>(4)) < eps)
mCalibFlags |= cv::CALIB_FIX_K3;
}
}
}
bool calib::calibController::getCommonCalibrationState() const
{
int rating = (int)getFramesNumberState() + (int)getConfidenceIntrervalsState() +
(int)getRMSState() + (int)mCoverageQualityState;
return rating == 4;
}
bool calib::calibController::getFramesNumberState() const
{
return std::max(mCalibData->imagePoints.size(), mCalibData->allCharucoCorners.size()) > mMinFramesNum;
}
bool calib::calibController::getConfidenceIntrervalsState() const
{
return mConfIntervalsState;
}
bool calib::calibController::getRMSState() const
{
return mCalibData->totalAvgErr < 0.5;
}
int calib::calibController::getNewFlags() const
{
return mCalibFlags;
}
//////////////////// calibDataController
double calib::calibDataController::estimateGridSubsetQuality(size_t excludedIndex)
{
{
int gridSize = 10;
int xGridStep = mCalibData->imageSize.width / gridSize;
int yGridStep = mCalibData->imageSize.height / gridSize;
std::vector<int> pointsInCell(gridSize*gridSize);
std::fill(pointsInCell.begin(), pointsInCell.end(), 0);
for(size_t k = 0; k < mCalibData->imagePoints.size(); k++)
if(k != excludedIndex)
for(std::vector<cv::Point2f>::iterator pointIt = mCalibData->imagePoints[k].begin(); pointIt != mCalibData->imagePoints[k].end(); ++pointIt) {
int i = (int)((*pointIt).x / xGridStep);
int j = (int)((*pointIt).y / yGridStep);
pointsInCell[i*gridSize + j]++;
}
for(size_t k = 0; k < mCalibData->allCharucoCorners.size(); k++)
if(k != excludedIndex)
for(int l = 0; l < mCalibData->allCharucoCorners[k].size[0]; l++) {
|
random
|
<|fim_prefix|>re to store the size of the token cache state.
* @return The number of bytes copied. On error, zero is returned, the token cache is set to NULL, and the token cache
* size is set to zero.
*/
uint64_t llmodel_state_get_data(llmodel_model model, uint8_t *state_out, uint64_t state_size,
token_t **input_tokens_out, uint64_t *n_input_tokens);
/**
* Frees the temporary token cache buffer created by a call to llmodel_state_get_data().
* @param input_tokens The token cache buffer.
*/
void llmodel_state_free_input_tokens(token_t *input_tokens);
/**
* Restores the internal state of the model using data from the specified address.
* NOTE: This state data is specific to the type of model you have created.
* @param model A pointer to the llmodel_model instance.
* @param state A pointer to the state data.
* @param state_size The size of the state data.
* @param input_tokens The token cache associated with the saved state.
* @param n_input_tokens The number of tokens in input_tokens.
* @return The number of bytes read, or zero on error.
*/
uint64_t llmodel_state_set_data(llmodel_model model, const uint8_t *state, uint64_t state_size,
const token_t *input_tokens, uint64_t n_input_tokens);
/**
* Generate a response using the model.
* @param model A pointer to the llmodel_model instance.
* @param prompt A string representing the input prompt.
* @param prompt_callback A callback function for handling the processing of prompt.
* @param response_callback A callback function for handling the generated response.
* @param ctx A pointer to the llmodel_prompt_context structure.
* @param error A pointer to a string; will only be set on error.
*/
bool llmodel_prompt(llmodel_model model,
const char *prompt,
llmodel_prompt_callback prompt_callback,
llmodel_response_callback response_callback,
<|fim_suffix|>,
const char **error);
/**
* Generate an embedding using the model.
* NOTE: If given NULL pointers for the model or text, or an empty text, a NULL pointer will be
* returned. Bindings should signal an error when NULL is the return value.
* @param model A pointer to the llmodel_model instance.
* @param texts A pointer to a NULL-terminated array of strings representing the texts to generate an
* embedding for.
* @param embedding_size A pointer to a size_t type that will be set by the call indicating the length
* of the returned floating point array.
* @param prefix The model-specific prefix representing the embedding task, without the trailing colon. NULL for no
* prefix.
* @param dimensionality The embedding dimension, for use with Matryoshka-capable models. Set to -1 to for full-size.
* @param token_count Return location for the number of prompt tokens processed, or NULL.
* @param do_mean True to average multiple embeddings if the text is longer than the model can accept, False to
* truncate.
* @param atlas Try to be fully compatible with the Atlas API. Currently, this means texts longer than 8192 tokens with
* long_text_mode="mean" will raise an error. Disabled by default.
* @param cancel_cb Cancellation callback, or NULL. See the documentation of llmodel_emb_cancel_callback.
* @param error Return location for a malloc()ed string that will be set on error, or NULL.
* @return A pointer to an array of floating point values passed to the calling method which then will
* be responsible for lifetime of this memory. NULL if an error occurred.
*/
float *llmodel_embed(llmodel_model model, const char **texts, size_t *embedding_size, const char *prefix,
int dimensionality, size_t *token_count, bool do_mean, bool atlas,
llmodel_emb_cancel_callback cancel_cb, const char **error);
/**
* Frees the memory allocated by the llmodel_embedding function.
* @param ptr A pointer to the <|fim_middle|>llmodel_prompt_context *ctx
|
re to store the size of the token cache state.
* @return The number of bytes copied. On error, zero is returned, the token cache is set to NULL, and the token cache
* size is set to zero.
*/
uint64_t llmodel_state_get_data(llmodel_model model, uint8_t *state_out, uint64_t state_size,
token_t **input_tokens_out, uint64_t *n_input_tokens);
/**
* Frees the temporary token cache buffer created by a call to llmodel_state_get_data().
* @param input_tokens The token cache buffer.
*/
void llmodel_state_free_input_tokens(token_t *input_tokens);
/**
* Restores the internal state of the model using data from the specified address.
* NOTE: This state data is specific to the type of model you have created.
* @param model A pointer to the llmodel_model instance.
* @param state A pointer to the state data.
* @param state_size The size of the state data.
* @param input_tokens The token cache associated with the saved state.
* @param n_input_tokens The number of tokens in input_tokens.
* @return The number of bytes read, or zero on error.
*/
uint64_t llmodel_state_set_data(llmodel_model model, const uint8_t *state, uint64_t state_size,
const token_t *input_tokens, uint64_t n_input_tokens);
/**
* Generate a response using the model.
* @param model A pointer to the llmodel_model instance.
* @param prompt A string representing the input prompt.
* @param prompt_callback A callback function for handling the processing of prompt.
* @param response_callback A callback function for handling the generated response.
* @param ctx A pointer to the llmodel_prompt_context structure.
* @param error A pointer to a string; will only be set on error.
*/
bool llmodel_prompt(llmodel_model model,
const char *prompt,
llmodel_prompt_callback prompt_callback,
llmodel_response_callback response_callback,
|
llmodel_prompt_context *ctx
|
,
const char **error);
/**
* Generate an embedding using the model.
* NOTE: If given NULL pointers for the model or text, or an empty text, a NULL pointer will be
* returned. Bindings should signal an error when NULL is the return value.
* @param model A pointer to the llmodel_model instance.
* @param texts A pointer to a NULL-terminated array of strings representing the texts to generate an
* embedding for.
* @param embedding_size A pointer to a size_t type that will be set by the call indicating the length
* of the returned floating point array.
* @param prefix The model-specific prefix representing the embedding task, without the trailing colon. NULL for no
* prefix.
* @param dimensionality The embedding dimension, for use with Matryoshka-capable models. Set to -1 to for full-size.
* @param token_count Return location for the number of prompt tokens processed, or NULL.
* @param do_mean True to average multiple embeddings if the text is longer than the model can accept, False to
* truncate.
* @param atlas Try to be fully compatible with the Atlas API. Currently, this means texts longer than 8192 tokens with
* long_text_mode="mean" will raise an error. Disabled by default.
* @param cancel_cb Cancellation callback, or NULL. See the documentation of llmodel_emb_cancel_callback.
* @param error Return location for a malloc()ed string that will be set on error, or NULL.
* @return A pointer to an array of floating point values passed to the calling method which then will
* be responsible for lifetime of this memory. NULL if an error occurred.
*/
float *llmodel_embed(llmodel_model model, const char **texts, size_t *embedding_size, const char *prefix,
int dimensionality, size_t *token_count, bool do_mean, bool atlas,
llmodel_emb_cancel_callback cancel_cb, const char **error);
/**
* Frees the memory allocated by the llmodel_embedding function.
* @param ptr A pointer to the
|
ast_based
|
<|fim_prefix|>ions/9172)
LLAMA_API struct llama_model_params llama_model_default_params(void);
LLAMA_API struct llama_context_params llama_context_default_params(void);
LLAMA_API struct llama_sampler_chain_params llama_sampler_chain_default_params(void);
LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(void);
// Initialize the llama + ggml backend
// If numa is true, use NUMA optimizations
// Call once at the start of the program
LLAMA_API void llama_backend_init(void);
// Call once at the end of the program - currently only used for MPI
LLAMA_API void llama_backend_free(void);
//optional:
LLAMA_API void llama_numa_init(enum ggml_numa_strategy numa);
// Optional: an auto threadpool gets created in ggml if not passed explicitly
LLAMA_API void llama_attach_threadpool(
struct llama_context * ctx,
ggml_threadpool_t threadpool,
ggml_threadpool_t threadpool_batch);
LLAMA_API void llama_detach_threadpool(struct llama_context * ctx);
DEPRECATED(LLAMA_API struct llama_model * llama_load_model_from_file(
const char * path_model,
struct llama_model_params params),
"use llama_model_load_from_file instead");
// Load the model from a file
// If the file is split into multiple parts, the file name must follow this pattern: <name>-%05d-of-%05d.gguf
// If the split file name does not follow this pattern, use llama_model_load_from_splits
LLAMA_API struct llama_model * llama_model_load_from_file(
const char * path_model,
struct llama_model_params params);
// Load the model from multiple splits (support custom naming scheme)
// The paths must be in the correct order
LLAMA_API struct llama_model * llama_model_load_from_splits(
const char ** paths,
<|fim_suffix|>,
struct llama_model_params params);
LLAMA_API void llama_model_save_to_file(
const struct llama_model * model,
const char * path_model);
DEPRECATED(LLAMA_API void llama_free_model(struct llama_model * model),
"use llama_model_free instead");
LLAMA_API void llama_model_free(struct llama_model * model);
LLAMA_API struct llama_context * llama_init_from_model(
struct llama_model * model,
struct llama_context_params params);
DEPRECATED(LLAMA_API struct llama_context * llama_new_context_with_model(
struct llama_model * model,
struct llama_context_params params),
"use llama_init_from_model instead");
// Frees all allocated memory
LLAMA_API void llama_free(struct llama_context * ctx);
LLAMA_API int64_t llama_time_us(void);
LLAMA_API size_t llama_max_devices(void);
LLAMA_API size_t llama_max_parallel_sequences(void);
LLAMA_API bool llama_supports_mmap (void);
LLAMA_API bool llama_supports_mlock (void);
LLAMA_API bool llama_supports_gpu_offload(void);
LLAMA_API bool llama_supports_rpc (void);
LLAMA_API uint32_t llama_n_ctx (const struct llama_context * ctx);
LLAMA_API uint32_t llama_n_batch (const struct llama_context * ctx);
LLAMA_API uint32_t llama_n_ubatch (const struct llama_context * ctx);
LLAMA_API uint32_t llama_n_seq_max (const struct llama_context * ctx);
DEPRECATED(LLAMA_API int32_t llama_n_ctx_train(const struct llama_model * model), "use llama_model_n_ctx_train instead");
DEPRECATED(LLAMA_API int32_t llama_n_embd (const struct llama_model * model), "use llama_model_n_embd instead");
DEPRECATED(LLAMA_API int32_t llama_n_layer (const struct llama_model * model), "use llama_model_n_layer instead");
DEPRECATED(LLAMA_API int32_t llama_n_head (const struct llama_model * model), "use llama<|fim_middle|>size_t n_paths
|
ions/9172)
LLAMA_API struct llama_model_params llama_model_default_params(void);
LLAMA_API struct llama_context_params llama_context_default_params(void);
LLAMA_API struct llama_sampler_chain_params llama_sampler_chain_default_params(void);
LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(void);
// Initialize the llama + ggml backend
// If numa is true, use NUMA optimizations
// Call once at the start of the program
LLAMA_API void llama_backend_init(void);
// Call once at the end of the program - currently only used for MPI
LLAMA_API void llama_backend_free(void);
//optional:
LLAMA_API void llama_numa_init(enum ggml_numa_strategy numa);
// Optional: an auto threadpool gets created in ggml if not passed explicitly
LLAMA_API void llama_attach_threadpool(
struct llama_context * ctx,
ggml_threadpool_t threadpool,
ggml_threadpool_t threadpool_batch);
LLAMA_API void llama_detach_threadpool(struct llama_context * ctx);
DEPRECATED(LLAMA_API struct llama_model * llama_load_model_from_file(
const char * path_model,
struct llama_model_params params),
"use llama_model_load_from_file instead");
// Load the model from a file
// If the file is split into multiple parts, the file name must follow this pattern: <name>-%05d-of-%05d.gguf
// If the split file name does not follow this pattern, use llama_model_load_from_splits
LLAMA_API struct llama_model * llama_model_load_from_file(
const char * path_model,
struct llama_model_params params);
// Load the model from multiple splits (support custom naming scheme)
// The paths must be in the correct order
LLAMA_API struct llama_model * llama_model_load_from_splits(
const char ** paths,
|
size_t n_paths
|
,
struct llama_model_params params);
LLAMA_API void llama_model_save_to_file(
const struct llama_model * model,
const char * path_model);
DEPRECATED(LLAMA_API void llama_free_model(struct llama_model * model),
"use llama_model_free instead");
LLAMA_API void llama_model_free(struct llama_model * model);
LLAMA_API struct llama_context * llama_init_from_model(
struct llama_model * model,
struct llama_context_params params);
DEPRECATED(LLAMA_API struct llama_context * llama_new_context_with_model(
struct llama_model * model,
struct llama_context_params params),
"use llama_init_from_model instead");
// Frees all allocated memory
LLAMA_API void llama_free(struct llama_context * ctx);
LLAMA_API int64_t llama_time_us(void);
LLAMA_API size_t llama_max_devices(void);
LLAMA_API size_t llama_max_parallel_sequences(void);
LLAMA_API bool llama_supports_mmap (void);
LLAMA_API bool llama_supports_mlock (void);
LLAMA_API bool llama_supports_gpu_offload(void);
LLAMA_API bool llama_supports_rpc (void);
LLAMA_API uint32_t llama_n_ctx (const struct llama_context * ctx);
LLAMA_API uint32_t llama_n_batch (const struct llama_context * ctx);
LLAMA_API uint32_t llama_n_ubatch (const struct llama_context * ctx);
LLAMA_API uint32_t llama_n_seq_max (const struct llama_context * ctx);
DEPRECATED(LLAMA_API int32_t llama_n_ctx_train(const struct llama_model * model), "use llama_model_n_ctx_train instead");
DEPRECATED(LLAMA_API int32_t llama_n_embd (const struct llama_model * model), "use llama_model_n_embd instead");
DEPRECATED(LLAMA_API int32_t llama_n_layer (const struct llama_model * model), "use llama_model_n_layer instead");
DEPRECATED(LLAMA_API int32_t llama_n_head (const struct llama_model * model), "use llama
|
ast_based
|
<|fim_prefix|>lements, the intent is that as arrays are
// pushed and popped, the same storage will be reused. This should yield
// efficiencies for heap allocations. For example, in the toolchain we
// frequently have an array per scope, and only add to the current scope's
// array; this allows better reuse when entering and leaving scopes.
template <typename ValueT>
class ArrayStack {
public:
// Pushes a new array onto the stack.
auto PushArray() -> void { array_offsets_.push_back(values_.size()); }
// Pops the top array from the stack.
auto PopArray() -> void {
auto region = array_offsets_.pop_back_val();
values_.truncate(region);
}
// Returns the top array from the stack.
auto PeekArray() const -> llvm::ArrayRef<ValueT> {
CARBON_CHECK(!array_offsets_.empty());
return llvm::ArrayRef(values_).slice(array_offsets_.back());
}
auto PeekArray() -> llvm::MutableArrayRef<ValueT> {
CARBON_CHECK(!array_offsets_.empty());
return llvm::MutableArrayRef(values_).slice(array_offsets_.back());
}
// Returns the array at a specific index.
auto PeekArrayAt(int index) const -> llvm::ArrayRef<ValueT> {
auto ref = llvm::ArrayRef(values_).slice(array_offsets_[index]);
if (index + 1 < static_cast<int>(array_offsets_.size())) {
ref = ref.take_front(array_offsets_[index + 1] - array_offsets_[index]);
}
return ref;
}
// Returns the full set of values on the stack, regardless of whether any
// arrays are pushed.
auto PeekAllValues() const -> llvm::ArrayRef<ValueT> { return values_; }
// Appends a value to the top array on the stack.
auto AppendToTop(const ValueT& value) -> void {
CARBON_CHECK(!array_offsets_.empty(),
"Must call PushArray before AppendToTop.");
values_.push_back(value);
}
// Appends a value to the top array on the stack.
auto AppendToTop(ValueT&& value) -> void {
CARBON_CHECK(!array_offsets_.empty(),
"Must call PushArray before AppendToTop.");
<|fim_suffix|>
}
// Adds multiple values to the top array on the stack.
auto AppendToTop(llvm::ArrayRef<ValueT> values) -> void {
CARBON_CHECK(!array_offsets_.empty(),
"Must call PushArray before PushValues.");
llvm::append_range(values_, values);
}
// Returns the current number of values in all arrays.
auto all_values_size() const -> size_t { return values_.size(); }
// Returns true if the stack has no arrays pushed.
auto empty() const -> bool { return array_offsets_.empty(); }
private:
// For each pushed array, the start index in `values_`.
llvm::SmallVector<int32_t> array_offsets_;
// The full set of elements in all arrays.
llvm::SmallVector<ValueT> values_;
};
} // namespace Carbon
#endif // CARBON_COMMON_ARRAY_STACK_H_
<|fim_middle|>values_.push_back(std::move(value));
|
lements, the intent is that as arrays are
// pushed and popped, the same storage will be reused. This should yield
// efficiencies for heap allocations. For example, in the toolchain we
// frequently have an array per scope, and only add to the current scope's
// array; this allows better reuse when entering and leaving scopes.
template <typename ValueT>
class ArrayStack {
public:
// Pushes a new array onto the stack.
auto PushArray() -> void { array_offsets_.push_back(values_.size()); }
// Pops the top array from the stack.
auto PopArray() -> void {
auto region = array_offsets_.pop_back_val();
values_.truncate(region);
}
// Returns the top array from the stack.
auto PeekArray() const -> llvm::ArrayRef<ValueT> {
CARBON_CHECK(!array_offsets_.empty());
return llvm::ArrayRef(values_).slice(array_offsets_.back());
}
auto PeekArray() -> llvm::MutableArrayRef<ValueT> {
CARBON_CHECK(!array_offsets_.empty());
return llvm::MutableArrayRef(values_).slice(array_offsets_.back());
}
// Returns the array at a specific index.
auto PeekArrayAt(int index) const -> llvm::ArrayRef<ValueT> {
auto ref = llvm::ArrayRef(values_).slice(array_offsets_[index]);
if (index + 1 < static_cast<int>(array_offsets_.size())) {
ref = ref.take_front(array_offsets_[index + 1] - array_offsets_[index]);
}
return ref;
}
// Returns the full set of values on the stack, regardless of whether any
// arrays are pushed.
auto PeekAllValues() const -> llvm::ArrayRef<ValueT> { return values_; }
// Appends a value to the top array on the stack.
auto AppendToTop(const ValueT& value) -> void {
CARBON_CHECK(!array_offsets_.empty(),
"Must call PushArray before AppendToTop.");
values_.push_back(value);
}
// Appends a value to the top array on the stack.
auto AppendToTop(ValueT&& value) -> void {
CARBON_CHECK(!array_offsets_.empty(),
"Must call PushArray before AppendToTop.");
|
values_.push_back(std::move(value));
|
}
// Adds multiple values to the top array on the stack.
auto AppendToTop(llvm::ArrayRef<ValueT> values) -> void {
CARBON_CHECK(!array_offsets_.empty(),
"Must call PushArray before PushValues.");
llvm::append_range(values_, values);
}
// Returns the current number of values in all arrays.
auto all_values_size() const -> size_t { return values_.size(); }
// Returns true if the stack has no arrays pushed.
auto empty() const -> bool { return array_offsets_.empty(); }
private:
// For each pushed array, the start index in `values_`.
llvm::SmallVector<int32_t> array_offsets_;
// The full set of elements in all arrays.
llvm::SmallVector<ValueT> values_;
};
} // namespace Carbon
#endif // CARBON_COMMON_ARRAY_STACK_H_
|
ast_based
|
<|fim_prefix|>
Vector2 insert_at_pos;
typedef Pair<int, int> IntPair;
bool moving_selection_attempt = false;
bool moving_inserted_key = false;
Point2 moving_selection_mouse_begin;
IntPair select_single_attempt;
bool moving_selection = false;
int moving_selection_from_key = 0;
int moving_selection_from_track = 0;
Vector2 moving_selection_offset;
bool box_selecting_attempt = false;
bool box_selecting = false;
bool box_selecting_add = false;
Vector2 box_selection_from;
Vector2 box_selection_to;
Rect2 selection_rect;
Rect2 selection_handles_rect;
bool scaling_selection = false;
Vector2i scaling_selection_handles;
Vector2 scaling_selection_scale = Vector2(1, 1);
Vector2 scaling_selection_offset;
Point2 scaling_selection_pivot;
int moving_handle = 0; //0 no move -1 or +1 out, 2 both (drawing only)
int moving_handle_key = 0;
int moving_handle_track = 0;
Vector2 moving_handle_left;
Vector2 moving_handle_right;
int moving_handle_mode = 0; // value from Animation::HandleMode
struct PairHasher {
static _FORCE_INLINE_ uint32_t hash(const Pair<int, int> &p_value) {
int32_t hash = 23;
hash = hash * 31 * hash_one_uint64(p_value.first);
hash = hash * 31 * hash_one_uint64(p_value.second);
return hash;
}
};
HashMap<Pair<int, int>, Vector2, PairHasher> additional_moving_handle_lefts;
HashMap<Pair<int, int>, Vector2, PairHasher> additional_moving_handle_rights;
void _clear_selection();
void _clear_selection_for_anim(const Ref<Animation> &p_anim);
void _select_at_anim(const Ref<Animation> &p_anim, int p_track, real_t p_pos, bool p_single);
bool _try_select_at_ui_pos(const Point2 &p_pos, bool p_aggregate, bool p_deselectable);
void _change_selected_keys_handle_mode(Animation::HandleMode p_mode, bool p_auto = false);
Vector2 menu_insert_key;
struct AnimMoveRestore {
int track = 0;
double time = 0;
Variant key;
real_t transition = 0;
};
AnimationTrackEditor *editor = nullptr;
struct EditPoint {
Rect2 point_rect;<|fim_suffix|> Vector<EditPoint> edit_points;
struct PairCompare {
bool operator()(const IntPair &lh, const IntPair &rh) {
if (lh.first == rh.first) {
return lh.second < rh.second;
} else {
return lh.first < rh.first;
}
}
};
typedef RBSet<IntPair, PairCompare> SelectionSet;
SelectionSet selection;
Ref<ViewPanner> panner;
void _pan_callback(Vector2 p_scroll_vec, Ref<InputEvent> p_event);
void _zoom_callback(float p_zoom_factor, Vector2 p_origin, Ref<InputEvent> p_event);
void _draw_line_clipped(const Vector2 &p_from, const Vector2 &p_to, const Color &p_color, int p_clip_left, int p_clip_right);
void _draw_track(int p_track, const Color &p_color);
float _bezier_h_to_pixel(float p_h);
void _zoom_vertically(real_t p_minimum_value, real_t p_maximum_value);
protected:
static void _bind_methods();
void _notification(int p_what);
public:
static float get_bezier_key_value(Array p_bezier_key_array);
virtual String get_tooltip(const Point2 &p_pos) const override;
Ref<Animation> get_animation() const;
void set_animation_and_track(const Ref<Animation> &p_animation, int p_track, bool p_read_only);
virtual Size2 get_minimum_size() const override;
virtual CursorShape get_cursor_shape(const Point2 &p_pos) const override;
void set_timeline(AnimationTimelineEdit *p_timeline);
void set_editor(AnimationTrackEditor *p_editor);
void set_root(Node *p_root);
void set_filtered(bool p_filtered);
void auto_fit_vertically();
void set_play_position(real_t p_pos);
void update_play_position();
void duplicate_selected_keys(real_t p_ofs, bool p_ofs_valid);
void copy_selected_keys(bool p_cut);
void paste_keys(real_t p_ofs, bool p_ofs_valid);
void delete_selection();
<|fim_middle|> Rect2 in_rect;
Rect2 out_rect;
int track = 0;
int key = 0;
};
|
Vector2 insert_at_pos;
typedef Pair<int, int> IntPair;
bool moving_selection_attempt = false;
bool moving_inserted_key = false;
Point2 moving_selection_mouse_begin;
IntPair select_single_attempt;
bool moving_selection = false;
int moving_selection_from_key = 0;
int moving_selection_from_track = 0;
Vector2 moving_selection_offset;
bool box_selecting_attempt = false;
bool box_selecting = false;
bool box_selecting_add = false;
Vector2 box_selection_from;
Vector2 box_selection_to;
Rect2 selection_rect;
Rect2 selection_handles_rect;
bool scaling_selection = false;
Vector2i scaling_selection_handles;
Vector2 scaling_selection_scale = Vector2(1, 1);
Vector2 scaling_selection_offset;
Point2 scaling_selection_pivot;
int moving_handle = 0; //0 no move -1 or +1 out, 2 both (drawing only)
int moving_handle_key = 0;
int moving_handle_track = 0;
Vector2 moving_handle_left;
Vector2 moving_handle_right;
int moving_handle_mode = 0; // value from Animation::HandleMode
struct PairHasher {
static _FORCE_INLINE_ uint32_t hash(const Pair<int, int> &p_value) {
int32_t hash = 23;
hash = hash * 31 * hash_one_uint64(p_value.first);
hash = hash * 31 * hash_one_uint64(p_value.second);
return hash;
}
};
HashMap<Pair<int, int>, Vector2, PairHasher> additional_moving_handle_lefts;
HashMap<Pair<int, int>, Vector2, PairHasher> additional_moving_handle_rights;
void _clear_selection();
void _clear_selection_for_anim(const Ref<Animation> &p_anim);
void _select_at_anim(const Ref<Animation> &p_anim, int p_track, real_t p_pos, bool p_single);
bool _try_select_at_ui_pos(const Point2 &p_pos, bool p_aggregate, bool p_deselectable);
void _change_selected_keys_handle_mode(Animation::HandleMode p_mode, bool p_auto = false);
Vector2 menu_insert_key;
struct AnimMoveRestore {
int track = 0;
double time = 0;
Variant key;
real_t transition = 0;
};
AnimationTrackEditor *editor = nullptr;
struct EditPoint {
Rect2 point_rect;
|
Rect2 in_rect;
Rect2 out_rect;
int track = 0;
int key = 0;
};
|
Vector<EditPoint> edit_points;
struct PairCompare {
bool operator()(const IntPair &lh, const IntPair &rh) {
if (lh.first == rh.first) {
return lh.second < rh.second;
} else {
return lh.first < rh.first;
}
}
};
typedef RBSet<IntPair, PairCompare> SelectionSet;
SelectionSet selection;
Ref<ViewPanner> panner;
void _pan_callback(Vector2 p_scroll_vec, Ref<InputEvent> p_event);
void _zoom_callback(float p_zoom_factor, Vector2 p_origin, Ref<InputEvent> p_event);
void _draw_line_clipped(const Vector2 &p_from, const Vector2 &p_to, const Color &p_color, int p_clip_left, int p_clip_right);
void _draw_track(int p_track, const Color &p_color);
float _bezier_h_to_pixel(float p_h);
void _zoom_vertically(real_t p_minimum_value, real_t p_maximum_value);
protected:
static void _bind_methods();
void _notification(int p_what);
public:
static float get_bezier_key_value(Array p_bezier_key_array);
virtual String get_tooltip(const Point2 &p_pos) const override;
Ref<Animation> get_animation() const;
void set_animation_and_track(const Ref<Animation> &p_animation, int p_track, bool p_read_only);
virtual Size2 get_minimum_size() const override;
virtual CursorShape get_cursor_shape(const Point2 &p_pos) const override;
void set_timeline(AnimationTimelineEdit *p_timeline);
void set_editor(AnimationTrackEditor *p_editor);
void set_root(Node *p_root);
void set_filtered(bool p_filtered);
void auto_fit_vertically();
void set_play_position(real_t p_pos);
void update_play_position();
void duplicate_selected_keys(real_t p_ofs, bool p_ofs_valid);
void copy_selected_keys(bool p_cut);
void paste_keys(real_t p_ofs, bool p_ofs_valid);
void delete_selection();
|
random
|
<|fim_prefix|> Ref<Texture2D> visibility_hidden = get_editor_theme_icon(SNAME("GuiVisibilityHidden"));
float visibility_hpos = lock_hpos - h_separation - visibility_visible->get_width();
Ref<Texture2D> solo = get_editor_theme_icon(SNAME("AudioBusSolo"));
float solo_hpos = visibility_hpos - h_separation - solo->get_width();
float buttons_width = remove->get_width() + lock->get_width() + visibility_visible->get_width() + solo->get_width() + h_separation * 3;
for (int i = 0; i < tracks.size(); ++i) {
// Related track titles.
int current_track = tracks[i];
String path = String(animation->track_get_path(current_track));
path = path.replace_first(base_path, "");
Color cc = color;
TextLine text_buf = TextLine(path, font, font_size);
text_buf.set_width(limit - margin - buttons_width - h_separation * 2);
Rect2 rect = Rect2(margin, vofs, solo_hpos - h_separation - solo->get_width(), text_buf.get_size().y + v_separation);
cc.a *= 0.7;
float h;
if (path.ends_with(":x")) {
h = 0;
} else if (path.ends_with(":y")) {
h = 0.33f;
} else if (path.ends_with(":z")) {
h = 0.66f;
} else {
uint32_t hash = path.hash();
hash = ((hash >> 16) ^ hash) * 0x45d9f3b;
hash = ((hash >> 16) ^ hash) * 0x45d9f3b;
hash = (hash >> 16) ^ hash;
h = (hash % 65535) / 65536.0;
}
if (current_track != selected_track) {
Color track_color;
if (locked_tracks.has(current_track)) {
track_color.set_hsv(h, 0, 0.4);
} else {
track_color.set_hsv(h, 0.2, 0.8);
}
track_color.a = 0.5;
draw_rect(Rect2(0, vofs, margin - h_separation, text_buf.get_size().y * 0.8), track_color);
subtrack_colors[current_track] = track_color;
subtracks[current_track] = rect;
} else {
draw_rect(rect, track_focus_color);
if (locked_tracks.has(selected_track)) {<|fim_suffix|> selected_track_color.set_hsv(h, 0.8, 0.8);
}
}
Vector2 string_pos = Point2(margin + h_separation, vofs);
text_buf.draw(get_canvas_item(), string_pos, cc);
float icon_start_height = vofs + rect.size.y / 2.0;
Rect2 remove_rect = Rect2(remove_hpos, icon_start_height - remove->get_height() / 2.0, remove->get_width(), remove->get_height());
if (read_only) {
draw_texture(remove, remove_rect.position, dc);
} else {
draw_texture(remove, remove_rect.position);
}
Rect2 lock_rect = Rect2(lock_hpos, icon_start_height - lock->get_height() / 2.0, lock->get_width(), lock->get_height());
if (locked_tracks.has(current_track)) {
draw_texture(lock, lock_rect.position);
} else {
draw_texture(unlock, lock_rect.position);
}
Rect2 visible_rect = Rect2(visibility_hpos, icon_start_height - visibility_visible->get_height() / 2.0, visibility_visible->get_width(), visibility_visible->get_height());
if (hidden_tracks.has(current_track)) {
draw_texture(visibility_hidden, visible_rect.position);
} else {
draw_texture(visibility_visible, visible_rect.position);
}
Rect2 solo_rect = Rect2(solo_hpos, icon_start_height - solo->get_height() / 2.0, solo->get_width(), solo->get_height());
draw_texture(solo, solo_rect.position);
RBMap<int, Rect2> track_icons;
track_icons[REMOVE_ICON] = remove_rect;
track_icons[LOCK_ICON] = lock_rect;
track_icons[VISIBILITY_ICON] = visible_rect;
track_icons[SOLO_ICON] = solo_rect;
subtrack_icons[current_track] = track_icons;
vofs += text_buf.get_size().y + v_separation;
track_v_scroll_max += text_buf.get_size().y + v_separation;
}
}
const Color accent = get_theme_color(SNAME("accent_color"), EditorStringName(Editor));
// Guides.
{
float min_left_scale = font->get_height(font_size) + v_separation;
float scale = (min_left_scale * 2) * timeline_v_zoom;<|fim_middle|> selected_track_color.set_hsv(h, 0.0, 0.4);
} else {
|
Ref<Texture2D> visibility_hidden = get_editor_theme_icon(SNAME("GuiVisibilityHidden"));
float visibility_hpos = lock_hpos - h_separation - visibility_visible->get_width();
Ref<Texture2D> solo = get_editor_theme_icon(SNAME("AudioBusSolo"));
float solo_hpos = visibility_hpos - h_separation - solo->get_width();
float buttons_width = remove->get_width() + lock->get_width() + visibility_visible->get_width() + solo->get_width() + h_separation * 3;
for (int i = 0; i < tracks.size(); ++i) {
// Related track titles.
int current_track = tracks[i];
String path = String(animation->track_get_path(current_track));
path = path.replace_first(base_path, "");
Color cc = color;
TextLine text_buf = TextLine(path, font, font_size);
text_buf.set_width(limit - margin - buttons_width - h_separation * 2);
Rect2 rect = Rect2(margin, vofs, solo_hpos - h_separation - solo->get_width(), text_buf.get_size().y + v_separation);
cc.a *= 0.7;
float h;
if (path.ends_with(":x")) {
h = 0;
} else if (path.ends_with(":y")) {
h = 0.33f;
} else if (path.ends_with(":z")) {
h = 0.66f;
} else {
uint32_t hash = path.hash();
hash = ((hash >> 16) ^ hash) * 0x45d9f3b;
hash = ((hash >> 16) ^ hash) * 0x45d9f3b;
hash = (hash >> 16) ^ hash;
h = (hash % 65535) / 65536.0;
}
if (current_track != selected_track) {
Color track_color;
if (locked_tracks.has(current_track)) {
track_color.set_hsv(h, 0, 0.4);
} else {
track_color.set_hsv(h, 0.2, 0.8);
}
track_color.a = 0.5;
draw_rect(Rect2(0, vofs, margin - h_separation, text_buf.get_size().y * 0.8), track_color);
subtrack_colors[current_track] = track_color;
subtracks[current_track] = rect;
} else {
draw_rect(rect, track_focus_color);
if (locked_tracks.has(selected_track)) {
|
selected_track_color.set_hsv(h, 0.0, 0.4);
} else {
|
selected_track_color.set_hsv(h, 0.8, 0.8);
}
}
Vector2 string_pos = Point2(margin + h_separation, vofs);
text_buf.draw(get_canvas_item(), string_pos, cc);
float icon_start_height = vofs + rect.size.y / 2.0;
Rect2 remove_rect = Rect2(remove_hpos, icon_start_height - remove->get_height() / 2.0, remove->get_width(), remove->get_height());
if (read_only) {
draw_texture(remove, remove_rect.position, dc);
} else {
draw_texture(remove, remove_rect.position);
}
Rect2 lock_rect = Rect2(lock_hpos, icon_start_height - lock->get_height() / 2.0, lock->get_width(), lock->get_height());
if (locked_tracks.has(current_track)) {
draw_texture(lock, lock_rect.position);
} else {
draw_texture(unlock, lock_rect.position);
}
Rect2 visible_rect = Rect2(visibility_hpos, icon_start_height - visibility_visible->get_height() / 2.0, visibility_visible->get_width(), visibility_visible->get_height());
if (hidden_tracks.has(current_track)) {
draw_texture(visibility_hidden, visible_rect.position);
} else {
draw_texture(visibility_visible, visible_rect.position);
}
Rect2 solo_rect = Rect2(solo_hpos, icon_start_height - solo->get_height() / 2.0, solo->get_width(), solo->get_height());
draw_texture(solo, solo_rect.position);
RBMap<int, Rect2> track_icons;
track_icons[REMOVE_ICON] = remove_rect;
track_icons[LOCK_ICON] = lock_rect;
track_icons[VISIBILITY_ICON] = visible_rect;
track_icons[SOLO_ICON] = solo_rect;
subtrack_icons[current_track] = track_icons;
vofs += text_buf.get_size().y + v_separation;
track_v_scroll_max += text_buf.get_size().y + v_separation;
}
}
const Color accent = get_theme_color(SNAME("accent_color"), EditorStringName(Editor));
// Guides.
{
float min_left_scale = font->get_height(font_size) + v_separation;
float scale = (min_left_scale * 2) * timeline_v_zoom;
|
random
|
<|fim_prefix|>#include "arg.h"
#include "common.h"
#include "log.h"
#include "llama.h"
#include <algorithm>
#include <cstdio>
#include <string>
#include <vector>
<|fim_suffix|>
int main(int argc, char ** argv) {
common_params params;
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_BENCH, print_usage)) {
return 1;
}
common_init();
int is_pp_shared = params.is_pp_shared;
std::vector<int> n_pp = params.n_pp;
std::vector<int> n_tg = params.n_tg;
std::vector<int> n_pl = params.n_pl;
// init LLM
llama_backend_init();
llama_numa_init(params.numa);
// initialize the model
llama_model_params model_params = common_model_params_to_llama(params);
llama_model * model = llama_model_load_from_file(params.model.path.c_str(), model_params);
if (model == NULL) {
fprintf(stderr , "%s: error: unable to load model\n" , __func__);
return 1;
}
llama_context_params ctx_params = common_context_params_to_llama(params);
// ensure enough sequences are available
ctx_params.n_seq_max = n_pl.empty() ? 1 : *std::max_element(n_pl.begin(), n_pl.end());
llama_context * ctx = llama_init_from_model(model, ctx_params);
if (ctx == NULL) {
fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__);
return 1;
}
const llama_vocab * vocab = llama_model_get_vocab(model);
const int32_t n_vocab = llama_vocab_n_tokens(vocab);
const auto get_token_rand = [n_vocab]() -> llama_token {
return std::rand() % n_vocab;
};
auto * mem = llama_get_memory(ctx);
const int32_t n_kv_max = llama_n_ctx(ctx);
llama_batch batch = llama_batch_init(n_kv_max, 0, 1);
// decode in batches of ctx_params.n_batch tokens
auto decode_helper = [](llama_context * ctx, llama_batch & batch, int32_t n_batch, bool synchronize) {
for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch) {
const int32_t n_tokens = std::min(n_batch, (int32_t) (batch.n_tokens - i));
llama_batch batch_view = {
n_tokens,
batch.token + i,
<|fim_middle|>static void print_usage(int, char ** argv) {
LOG("\nexample usage:\n");
LOG("\n %s -m model.gguf -c 2048 -b 2048 -ub 512 -npp 128,256,512 -ntg 128,256 -npl 1,2,4,8,16,32 [-pps]\n", argv[0]);
LOG("\n");
}
|
#include "arg.h"
#include "common.h"
#include "log.h"
#include "llama.h"
#include <algorithm>
#include <cstdio>
#include <string>
#include <vector>
|
static void print_usage(int, char ** argv) {
LOG("\nexample usage:\n");
LOG("\n %s -m model.gguf -c 2048 -b 2048 -ub 512 -npp 128,256,512 -ntg 128,256 -npl 1,2,4,8,16,32 [-pps]\n", argv[0]);
LOG("\n");
}
|
int main(int argc, char ** argv) {
common_params params;
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_BENCH, print_usage)) {
return 1;
}
common_init();
int is_pp_shared = params.is_pp_shared;
std::vector<int> n_pp = params.n_pp;
std::vector<int> n_tg = params.n_tg;
std::vector<int> n_pl = params.n_pl;
// init LLM
llama_backend_init();
llama_numa_init(params.numa);
// initialize the model
llama_model_params model_params = common_model_params_to_llama(params);
llama_model * model = llama_model_load_from_file(params.model.path.c_str(), model_params);
if (model == NULL) {
fprintf(stderr , "%s: error: unable to load model\n" , __func__);
return 1;
}
llama_context_params ctx_params = common_context_params_to_llama(params);
// ensure enough sequences are available
ctx_params.n_seq_max = n_pl.empty() ? 1 : *std::max_element(n_pl.begin(), n_pl.end());
llama_context * ctx = llama_init_from_model(model, ctx_params);
if (ctx == NULL) {
fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__);
return 1;
}
const llama_vocab * vocab = llama_model_get_vocab(model);
const int32_t n_vocab = llama_vocab_n_tokens(vocab);
const auto get_token_rand = [n_vocab]() -> llama_token {
return std::rand() % n_vocab;
};
auto * mem = llama_get_memory(ctx);
const int32_t n_kv_max = llama_n_ctx(ctx);
llama_batch batch = llama_batch_init(n_kv_max, 0, 1);
// decode in batches of ctx_params.n_batch tokens
auto decode_helper = [](llama_context * ctx, llama_batch & batch, int32_t n_batch, bool synchronize) {
for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch) {
const int32_t n_tokens = std::min(n_batch, (int32_t) (batch.n_tokens - i));
llama_batch batch_view = {
n_tokens,
batch.token + i,
|
ast_based
|
<|fim_prefix|> ofs += timeline_v_scroll;
int iv = int(ofs / scale);
if (ofs < 0) {
iv -= 1;
}
if (!first && iv != prev_iv) {
Color lc = h_line_color;
lc.a *= 0.5;
draw_line(Point2(limit, i), Point2(right_limit, i), lc, Math::round(EDSCALE));
Color c = color;
c.a *= 0.5;
draw_string(font, Point2(limit + 8, i - 2), TS->format_number(rtos(Math::snapped((iv + 1) * scale, step))), HORIZONTAL_ALIGNMENT_LEFT, -1, font_size, c);
}
first = false;
prev_iv = iv;
}
}
// Draw other curves.
{
float scale = timeline->get_zoom_scale();
Ref<Texture2D> point = get_editor_theme_icon(SNAME("KeyValue"));
for (const KeyValue<int, Color> &E : subtrack_colors) {
if (hidden_tracks.has(E.key)) {
continue;
}
_draw_track(E.key, E.value);
for (int i = 0; i < animation->track_get_key_count(E.key); i++) {
float offset = animation->track_get_key_time(E.key, i);
float value = animation->bezier_track_get_key_value(E.key, i);
Vector2 pos((offset - timeline->get_value()) * scale + limit, _bezier_h_to_pixel(value));
if (pos.x >= limit && pos.x <= right_limit) {
draw_texture(point, pos - point->get_size() / 2.0, E.value);
}
}
}
if (track_count > 0 && !hidden_tracks.has(selected_track)) {
// Draw edited curve.
_draw_track(selected_track, selected_track_color);
}
}
const bool draw_selection_handles = selection.size() > 1;
LocalVector<Point2> selected_pos;
// Draw editor handles.
{
edit_points.clear();
float scale = timeline->get_zoom_scale();
for (int i = 0; i < track_count; ++i) {
bool draw_track = _is_track_curves_displayed(i) && !locked_tracks.has(i);
if (!draw_selection_handles && !draw_track) {
continue;
}
int key_count = animation->track_get_key_count(i);
for (int j = 0; j < key_count; ++j) {
float offset = animation->track_get_key_time(i, j);
<|fim_suffix|>
bool is_selected = selection.has(IntPair(i, j));
if (is_selected) {
if (moving_selection) {
offset += moving_selection_offset.x;
value += moving_selection_offset.y;
} else if (scaling_selection) {
offset += -scaling_selection_offset.x + (offset - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
value += -scaling_selection_offset.y + (value - scaling_selection_pivot.y) * (scaling_selection_scale.y - 1);
}
}
Vector2 pos((offset - timeline->get_value()) * scale + limit, _bezier_h_to_pixel(value));
if (draw_selection_handles && is_selected) {
selected_pos.push_back(pos);
}
if (!draw_track) {
continue;
}
Vector2 in_vec = animation->bezier_track_get_key_in_handle(i, j);
Vector2 out_vec = animation->bezier_track_get_key_out_handle(i, j);
if ((moving_handle == 1 || moving_handle == -1) && moving_handle_track == i && moving_handle_key == j) {
in_vec = moving_handle_left;
}
if ((moving_handle == 1 || moving_handle == -1) && moving_handle_track == i && moving_handle_key == j) {
out_vec = moving_handle_right;
}
if (moving_inserted_key && moving_selection_from_key == j) {
Animation::HandleMode handle_mode = animation->bezier_track_get_key_handle_mode(i, j);
if (handle_mode != Animation::HANDLE_MODE_FREE) {
int key_prev = 0;
int key_next = moving_selection_from_key;
for (int k = 0; k < key_count; k++) {
if (k == moving_selection_from_key) {
continue;
}
if (animation->track_get_key_time(i, k) < offset) {
key_prev = k;
} else {
key_next = k;
break;
}
}
float prev_time = offset;
float prev_value = value;
if (key_prev != moving_selection_from_key) {
prev_time = animation->track_get_key_time(i, key_prev);
prev_value = anima<|fim_middle|>float value = animation->bezier_track_get_key_value(i, j);
|
ofs += timeline_v_scroll;
int iv = int(ofs / scale);
if (ofs < 0) {
iv -= 1;
}
if (!first && iv != prev_iv) {
Color lc = h_line_color;
lc.a *= 0.5;
draw_line(Point2(limit, i), Point2(right_limit, i), lc, Math::round(EDSCALE));
Color c = color;
c.a *= 0.5;
draw_string(font, Point2(limit + 8, i - 2), TS->format_number(rtos(Math::snapped((iv + 1) * scale, step))), HORIZONTAL_ALIGNMENT_LEFT, -1, font_size, c);
}
first = false;
prev_iv = iv;
}
}
// Draw other curves.
{
float scale = timeline->get_zoom_scale();
Ref<Texture2D> point = get_editor_theme_icon(SNAME("KeyValue"));
for (const KeyValue<int, Color> &E : subtrack_colors) {
if (hidden_tracks.has(E.key)) {
continue;
}
_draw_track(E.key, E.value);
for (int i = 0; i < animation->track_get_key_count(E.key); i++) {
float offset = animation->track_get_key_time(E.key, i);
float value = animation->bezier_track_get_key_value(E.key, i);
Vector2 pos((offset - timeline->get_value()) * scale + limit, _bezier_h_to_pixel(value));
if (pos.x >= limit && pos.x <= right_limit) {
draw_texture(point, pos - point->get_size() / 2.0, E.value);
}
}
}
if (track_count > 0 && !hidden_tracks.has(selected_track)) {
// Draw edited curve.
_draw_track(selected_track, selected_track_color);
}
}
const bool draw_selection_handles = selection.size() > 1;
LocalVector<Point2> selected_pos;
// Draw editor handles.
{
edit_points.clear();
float scale = timeline->get_zoom_scale();
for (int i = 0; i < track_count; ++i) {
bool draw_track = _is_track_curves_displayed(i) && !locked_tracks.has(i);
if (!draw_selection_handles && !draw_track) {
continue;
}
int key_count = animation->track_get_key_count(i);
for (int j = 0; j < key_count; ++j) {
float offset = animation->track_get_key_time(i, j);
|
float value = animation->bezier_track_get_key_value(i, j);
|
bool is_selected = selection.has(IntPair(i, j));
if (is_selected) {
if (moving_selection) {
offset += moving_selection_offset.x;
value += moving_selection_offset.y;
} else if (scaling_selection) {
offset += -scaling_selection_offset.x + (offset - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
value += -scaling_selection_offset.y + (value - scaling_selection_pivot.y) * (scaling_selection_scale.y - 1);
}
}
Vector2 pos((offset - timeline->get_value()) * scale + limit, _bezier_h_to_pixel(value));
if (draw_selection_handles && is_selected) {
selected_pos.push_back(pos);
}
if (!draw_track) {
continue;
}
Vector2 in_vec = animation->bezier_track_get_key_in_handle(i, j);
Vector2 out_vec = animation->bezier_track_get_key_out_handle(i, j);
if ((moving_handle == 1 || moving_handle == -1) && moving_handle_track == i && moving_handle_key == j) {
in_vec = moving_handle_left;
}
if ((moving_handle == 1 || moving_handle == -1) && moving_handle_track == i && moving_handle_key == j) {
out_vec = moving_handle_right;
}
if (moving_inserted_key && moving_selection_from_key == j) {
Animation::HandleMode handle_mode = animation->bezier_track_get_key_handle_mode(i, j);
if (handle_mode != Animation::HANDLE_MODE_FREE) {
int key_prev = 0;
int key_next = moving_selection_from_key;
for (int k = 0; k < key_count; k++) {
if (k == moving_selection_from_key) {
continue;
}
if (animation->track_get_key_time(i, k) < offset) {
key_prev = k;
} else {
key_next = k;
break;
}
}
float prev_time = offset;
float prev_value = value;
if (key_prev != moving_selection_from_key) {
prev_time = animation->track_get_key_time(i, key_prev);
prev_value = anima
|
ast_based
|
<|fim_prefix|>t);
_draw_line_clipped(pos, pos_out, accent, limit, right_limit);
}
EditPoint ep;
ep.track = i;
ep.key = j;
if (pos.x >= limit && pos.x <= right_limit) {
ep.point_rect.position = (pos - bezier_icon->get_size() / 2.0).floor();
ep.point_rect.size = bezier_icon->get_size();
if (is_selected) {
draw_texture(selected_icon, ep.point_rect.position);
draw_string(font, ep.point_rect.position + Vector2(8, -font->get_height(font_size) - 8), TTR("Time:") + " " + TS->format_number(rtos(Math::snapped(offset, 0.0001))), HORIZONTAL_ALIGNMENT_LEFT, -1, font_size, accent);
draw_string(font, ep.point_rect.position + Vector2(8, -8), TTR("Value:") + " " + TS->format_number(rtos(Math::snapped(value, 0.001))), HORIZONTAL_ALIGNMENT_LEFT, -1, font_size, accent);
} else {
Color track_color = Color(1, 1, 1, 1);
if (i != selected_track) {
track_color = subtrack_colors[i];
}
draw_texture(bezier_icon, ep.point_rect.position, track_color);
}
ep.point_rect = ep.point_rect.grow(ep.point_rect.size.width * 0.5);
}
ep.point_rect = ep.point_rect.grow(ep.point_rect.size.width * 0.5);
if (i == selected_track || is_selected) {
if (animation->bezier_track_get_key_handle_mode(i, j) != Animation::HANDLE_MODE_LINEAR) {
if (pos_in.x >= limit && pos_in.x <= right_limit) {
ep.in_rect.position = (pos_in - bezier_handle_icon->get_size() / 2.0).floor();
ep.in_rect.size = bezier_handle_icon->get_size();
draw_texture(bezier_handle_icon, ep.in_rect.position);
ep.in_rect = ep.in_rect.grow(ep.in_rect.size.width * 0.5);
}
if (pos_out.x >= limit && pos_out.x <= right_limit) {
ep.out_rect.position = (pos_out - bezier_handle_icon->get_size() / 2.0).floor();
ep.out_rect.size = bezier_handle_icon->get_size();
draw_texture(bezier_handle_icon, ep.out_rect.position);
<|fim_suffix|>;
}
}
}
if (!locked_tracks.has(i)) {
edit_points.push_back(ep);
}
}
}
for (int i = 0; i < edit_points.size(); ++i) {
if (edit_points[i].track == selected_track) {
EditPoint ep = edit_points[i];
edit_points.remove_at(i);
edit_points.insert(0, ep);
}
}
}
selection_rect = Rect2();
selection_handles_rect = Rect2();
// Draw scale handles.
if (draw_selection_handles) {
selection_rect.position = selected_pos[0];
selected_pos.remove_at(0);
for (const Point2 &pos : selected_pos) {
selection_rect = selection_rect.expand(pos);
}
const int outer_ofs = Math::round(12 * EDSCALE);
const int inner_ofs = Math::round(outer_ofs / 2.0);
// Draw horizontal handles.
if (selection_rect.size.height > CMP_EPSILON) {
_draw_line_clipped(selection_rect.position - Vector2(inner_ofs, inner_ofs), selection_rect.position + Vector2(selection_rect.size.width + inner_ofs, -inner_ofs), accent, limit, right_limit);
_draw_line_clipped(selection_rect.position + Vector2(-inner_ofs, selection_rect.size.height + inner_ofs), selection_rect.position + selection_rect.size + Vector2(inner_ofs, inner_ofs), accent, limit, right_limit);
}
// Draw vertical handles.
if (selection_rect.size.width > CMP_EPSILON) {
_draw_line_clipped(selection_rect.position - Vector2(inner_ofs, inner_ofs), selection_rect.position + Vector2(-inner_ofs, selection_rect.size.height + inner_ofs), accent, limit, right_limit);
_draw_line_clipped(selection_rect.position + Vector2(selection_rect.size.width + inner_ofs, -inner_ofs), selection_rect.position + selection_rect.size + Vector2(inner_ofs, inner_ofs), accent, limit, right_limit);
}
selection_handles_rect.position = selection_rect.position - Vector2(outer_ofs, outer_ofs);
selection_handles_rect.size = selection_rect.size + Vector2(outer_ofs, outer_ofs) * 2;
}
if (box_selecting) {
Vector2 bs_fro<|fim_middle|>ep.out_rect = ep.out_rect.grow(ep.out_rect.size.width * 0.5)
|
t);
_draw_line_clipped(pos, pos_out, accent, limit, right_limit);
}
EditPoint ep;
ep.track = i;
ep.key = j;
if (pos.x >= limit && pos.x <= right_limit) {
ep.point_rect.position = (pos - bezier_icon->get_size() / 2.0).floor();
ep.point_rect.size = bezier_icon->get_size();
if (is_selected) {
draw_texture(selected_icon, ep.point_rect.position);
draw_string(font, ep.point_rect.position + Vector2(8, -font->get_height(font_size) - 8), TTR("Time:") + " " + TS->format_number(rtos(Math::snapped(offset, 0.0001))), HORIZONTAL_ALIGNMENT_LEFT, -1, font_size, accent);
draw_string(font, ep.point_rect.position + Vector2(8, -8), TTR("Value:") + " " + TS->format_number(rtos(Math::snapped(value, 0.001))), HORIZONTAL_ALIGNMENT_LEFT, -1, font_size, accent);
} else {
Color track_color = Color(1, 1, 1, 1);
if (i != selected_track) {
track_color = subtrack_colors[i];
}
draw_texture(bezier_icon, ep.point_rect.position, track_color);
}
ep.point_rect = ep.point_rect.grow(ep.point_rect.size.width * 0.5);
}
ep.point_rect = ep.point_rect.grow(ep.point_rect.size.width * 0.5);
if (i == selected_track || is_selected) {
if (animation->bezier_track_get_key_handle_mode(i, j) != Animation::HANDLE_MODE_LINEAR) {
if (pos_in.x >= limit && pos_in.x <= right_limit) {
ep.in_rect.position = (pos_in - bezier_handle_icon->get_size() / 2.0).floor();
ep.in_rect.size = bezier_handle_icon->get_size();
draw_texture(bezier_handle_icon, ep.in_rect.position);
ep.in_rect = ep.in_rect.grow(ep.in_rect.size.width * 0.5);
}
if (pos_out.x >= limit && pos_out.x <= right_limit) {
ep.out_rect.position = (pos_out - bezier_handle_icon->get_size() / 2.0).floor();
ep.out_rect.size = bezier_handle_icon->get_size();
draw_texture(bezier_handle_icon, ep.out_rect.position);
|
ep.out_rect = ep.out_rect.grow(ep.out_rect.size.width * 0.5)
|
;
}
}
}
if (!locked_tracks.has(i)) {
edit_points.push_back(ep);
}
}
}
for (int i = 0; i < edit_points.size(); ++i) {
if (edit_points[i].track == selected_track) {
EditPoint ep = edit_points[i];
edit_points.remove_at(i);
edit_points.insert(0, ep);
}
}
}
selection_rect = Rect2();
selection_handles_rect = Rect2();
// Draw scale handles.
if (draw_selection_handles) {
selection_rect.position = selected_pos[0];
selected_pos.remove_at(0);
for (const Point2 &pos : selected_pos) {
selection_rect = selection_rect.expand(pos);
}
const int outer_ofs = Math::round(12 * EDSCALE);
const int inner_ofs = Math::round(outer_ofs / 2.0);
// Draw horizontal handles.
if (selection_rect.size.height > CMP_EPSILON) {
_draw_line_clipped(selection_rect.position - Vector2(inner_ofs, inner_ofs), selection_rect.position + Vector2(selection_rect.size.width + inner_ofs, -inner_ofs), accent, limit, right_limit);
_draw_line_clipped(selection_rect.position + Vector2(-inner_ofs, selection_rect.size.height + inner_ofs), selection_rect.position + selection_rect.size + Vector2(inner_ofs, inner_ofs), accent, limit, right_limit);
}
// Draw vertical handles.
if (selection_rect.size.width > CMP_EPSILON) {
_draw_line_clipped(selection_rect.position - Vector2(inner_ofs, inner_ofs), selection_rect.position + Vector2(-inner_ofs, selection_rect.size.height + inner_ofs), accent, limit, right_limit);
_draw_line_clipped(selection_rect.position + Vector2(selection_rect.size.width + inner_ofs, -inner_ofs), selection_rect.position + selection_rect.size + Vector2(inner_ofs, inner_ofs), accent, limit, right_limit);
}
selection_handles_rect.position = selection_rect.position - Vector2(outer_ofs, outer_ofs);
selection_handles_rect.size = selection_rect.size + Vector2(outer_ofs, outer_ofs) * 2;
}
if (box_selecting) {
Vector2 bs_fro
|
ast_based
|
<|fim_prefix|> queue_redraw();
emit_signal(SceneStringName(frame_changed));
}
double to_process = MIN(frame_progress / abs_speed, remaining);
frame_progress -= to_process * abs_speed;
remaining -= to_process;
}
i++;
if (i > fc) {
return; // Prevents freezing if to_process is each time much less than remaining.
}
}
} break;
case NOTIFICATION_DRAW: {
if (frames.is_null() || !frames->has_animation(animation)) {
return;
}
Ref<Texture2D> texture = frames->get_frame_texture(animation, frame);
if (texture.is_null()) {
return;
}
RID ci = get_canvas_item();
Size2 s = texture->get_size();
Point2 ofs = offset;
if (centered) {
ofs -= s / 2;
}
if (get_viewport() && get_viewport()->is_snap_2d_transforms_to_pixel_enabled()) {
ofs = (ofs + Point2(0.5, 0.5)).floor();
}
Rect2 dst_rect(ofs, s);
if (hflip) {
dst_rect.size.x = -dst_rect.size.x;
}
if (vflip) {
dst_rect.size.y = -dst_rect.size.y;
}
texture->draw_rect_region(ci, dst_rect, Rect2(Vector2(), texture->get_size()), Color(1, 1, 1), false);
} break;
}
}
void AnimatedSprite2D::set_sprite_frames(const Ref<SpriteFrames> &p_frames) {
if (frames == p_frames) {
return;
}
if (frames.is_valid()) {
frames->disconnect(CoreStringName(changed), callable_mp(this, &AnimatedSprite2D::_res_changed));
}
stop();
frames = p_frames;
if (frames.is_valid()) {
frames->connect(CoreStringName(changed), callable_mp(this, &AnimatedSprite2D::_res_changed));
List<StringName> al;
frames->get_animation_list(&al);
if (al.is_empty()) {
set_animation(StringName());
autoplay = String();
} else {
if (!frames->has_animation(animation)) {
set_animation(al.front()->get());
}
if (!frames->has_animation(autoplay)) {
autoplay = String();
}
}
}
notify_property_list_changed();
queue_redraw();
update_configuration_warnings();
emit_signal("sprite_frames_changed");
}<|fim_suffix|>}
void AnimatedSprite2D::set_frame(int p_frame) {
set_frame_and_progress(p_frame, std::signbit(get_playing_speed()) ? 1.0 : 0.0);
}
int AnimatedSprite2D::get_frame() const {
return frame;
}
void AnimatedSprite2D::set_frame_progress(real_t p_progress) {
frame_progress = p_progress;
}
real_t AnimatedSprite2D::get_frame_progress() const {
return frame_progress;
}
void AnimatedSprite2D::set_frame_and_progress(int p_frame, real_t p_progress) {
if (frames.is_null()) {
return;
}
bool has_animation = frames->has_animation(animation);
int end_frame = has_animation ? MAX(0, frames->get_frame_count(animation) - 1) : 0;
bool is_changed = frame != p_frame;
if (p_frame < 0) {
frame = 0;
} else if (has_animation && p_frame > end_frame) {
frame = end_frame;
} else {
frame = p_frame;
}
_calc_frame_speed_scale();
frame_progress = p_progress;
if (!is_changed) {
return; // No change, don't redraw.
}
queue_redraw();
emit_signal(SceneStringName(frame_changed));
}
void AnimatedSprite2D::set_speed_scale(float p_speed_scale) {
speed_scale = p_speed_scale;
}
float AnimatedSprite2D::get_speed_scale() const {
return speed_scale;
}
float AnimatedSprite2D::get_playing_speed() const {
if (!playing) {
return 0;
}
return speed_scale * custom_speed_scale;
}
void AnimatedSprite2D::set_centered(bool p_center) {
if (centered == p_center) {
return;
}
centered = p_center;
queue_redraw();
item_rect_changed();
}
bool AnimatedSprite2D::is_centered() const {
return centered;
}
void AnimatedSprite2D::set_offset(const Point2 &p_offset) {
if (offset == p_offset) {
return;
}
offset = p_offset;
queue_redraw();
item_rect_changed();
}
Point2 AnimatedSprite2D::get_offset() const {
return offset;
}
void AnimatedSprite2D::set_flip_h(bool p_flip) {
if (hflip == p_flip) {
return;
}
hflip = p_flip;
queue_redraw();
}
bool AnimatedSprite2D::is_flipped_h() const {
return hflip;
}
void AnimatedSprite2D::set_flip_v(bool p_flip) {<|fim_middle|>
Ref<SpriteFrames> AnimatedSprite2D::get_sprite_frames() const {
return frames;
|
queue_redraw();
emit_signal(SceneStringName(frame_changed));
}
double to_process = MIN(frame_progress / abs_speed, remaining);
frame_progress -= to_process * abs_speed;
remaining -= to_process;
}
i++;
if (i > fc) {
return; // Prevents freezing if to_process is each time much less than remaining.
}
}
} break;
case NOTIFICATION_DRAW: {
if (frames.is_null() || !frames->has_animation(animation)) {
return;
}
Ref<Texture2D> texture = frames->get_frame_texture(animation, frame);
if (texture.is_null()) {
return;
}
RID ci = get_canvas_item();
Size2 s = texture->get_size();
Point2 ofs = offset;
if (centered) {
ofs -= s / 2;
}
if (get_viewport() && get_viewport()->is_snap_2d_transforms_to_pixel_enabled()) {
ofs = (ofs + Point2(0.5, 0.5)).floor();
}
Rect2 dst_rect(ofs, s);
if (hflip) {
dst_rect.size.x = -dst_rect.size.x;
}
if (vflip) {
dst_rect.size.y = -dst_rect.size.y;
}
texture->draw_rect_region(ci, dst_rect, Rect2(Vector2(), texture->get_size()), Color(1, 1, 1), false);
} break;
}
}
void AnimatedSprite2D::set_sprite_frames(const Ref<SpriteFrames> &p_frames) {
if (frames == p_frames) {
return;
}
if (frames.is_valid()) {
frames->disconnect(CoreStringName(changed), callable_mp(this, &AnimatedSprite2D::_res_changed));
}
stop();
frames = p_frames;
if (frames.is_valid()) {
frames->connect(CoreStringName(changed), callable_mp(this, &AnimatedSprite2D::_res_changed));
List<StringName> al;
frames->get_animation_list(&al);
if (al.is_empty()) {
set_animation(StringName());
autoplay = String();
} else {
if (!frames->has_animation(animation)) {
set_animation(al.front()->get());
}
if (!frames->has_animation(autoplay)) {
autoplay = String();
}
}
}
notify_property_list_changed();
queue_redraw();
update_configuration_warnings();
emit_signal("sprite_frames_changed");
}
|
Ref<SpriteFrames> AnimatedSprite2D::get_sprite_frames() const {
return frames;
|
}
void AnimatedSprite2D::set_frame(int p_frame) {
set_frame_and_progress(p_frame, std::signbit(get_playing_speed()) ? 1.0 : 0.0);
}
int AnimatedSprite2D::get_frame() const {
return frame;
}
void AnimatedSprite2D::set_frame_progress(real_t p_progress) {
frame_progress = p_progress;
}
real_t AnimatedSprite2D::get_frame_progress() const {
return frame_progress;
}
void AnimatedSprite2D::set_frame_and_progress(int p_frame, real_t p_progress) {
if (frames.is_null()) {
return;
}
bool has_animation = frames->has_animation(animation);
int end_frame = has_animation ? MAX(0, frames->get_frame_count(animation) - 1) : 0;
bool is_changed = frame != p_frame;
if (p_frame < 0) {
frame = 0;
} else if (has_animation && p_frame > end_frame) {
frame = end_frame;
} else {
frame = p_frame;
}
_calc_frame_speed_scale();
frame_progress = p_progress;
if (!is_changed) {
return; // No change, don't redraw.
}
queue_redraw();
emit_signal(SceneStringName(frame_changed));
}
void AnimatedSprite2D::set_speed_scale(float p_speed_scale) {
speed_scale = p_speed_scale;
}
float AnimatedSprite2D::get_speed_scale() const {
return speed_scale;
}
float AnimatedSprite2D::get_playing_speed() const {
if (!playing) {
return 0;
}
return speed_scale * custom_speed_scale;
}
void AnimatedSprite2D::set_centered(bool p_center) {
if (centered == p_center) {
return;
}
centered = p_center;
queue_redraw();
item_rect_changed();
}
bool AnimatedSprite2D::is_centered() const {
return centered;
}
void AnimatedSprite2D::set_offset(const Point2 &p_offset) {
if (offset == p_offset) {
return;
}
offset = p_offset;
queue_redraw();
item_rect_changed();
}
Point2 AnimatedSprite2D::get_offset() const {
return offset;
}
void AnimatedSprite2D::set_flip_h(bool p_flip) {
if (hflip == p_flip) {
return;
}
hflip = p_flip;
queue_redraw();
}
bool AnimatedSprite2D::is_flipped_h() const {
return hflip;
}
void AnimatedSprite2D::set_flip_v(bool p_flip) {
|
random
|
<|fim_prefix|>) {
#ifdef WIN32
if (_setmode(_fileno(stdin), _O_BINARY) == -1)
tprintf("ERROR: cin to binary: %s", strerror(errno));
#endif // WIN32
}
if (stream_filelist) {
return ProcessPagesFileList(stdin, nullptr, retry_config, timeout_millisec, renderer,
tesseract_->tessedit_page_number);
}
// At this point we are officially in autodection territory.
// That means any data in stdin must be buffered, to make it
// seekable.
std::string buf;
const l_uint8 *data = nullptr;
if (stdInput) {
buf.assign((std::istreambuf_iterator<char>(std::cin)), (std::istreambuf_iterator<char>()));
data = reinterpret_cast<const l_uint8 *>(buf.data());
} else if (strstr(filename, "://") != nullptr) {
// Get image or image list by URL.
#ifdef HAVE_LIBCURL
CURL *curl = curl_easy_init();
if (curl == nullptr) {
fprintf(stderr, "Error, curl_easy_init failed\n");
return false;
} else {
CURLcode curlcode;
auto error = [curl, &curlcode](const char *function) {
fprintf(stderr, "Error, %s failed with error %s\n", function, curl_easy_strerror(curlcode));
curl_easy_cleanup(curl);
return false;
};
curlcode = curl_easy_setopt(curl, CURLOPT_URL, filename);
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
curlcode = curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1L);
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
// Follow HTTP, HTTPS, FTP and FTPS redirects.
curlcode = curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1);
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
// Allow no more than 8 redirections to prevent endless loops.
curlcode = curl_easy_setopt(curl, CURLOPT_MAXREDIRS, 8);
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
int timeout = curl_timeout;
if (timeout > 0) {
<|fim_suffix|>;
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
curlcode = curl_easy_setopt(curl, CURLOPT_TIMEOUT, timeout);
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
}
std::string cookiefile = curl_cookiefile;
if (!cookiefile.empty()) {
curlcode = curl_easy_setopt(curl, CURLOPT_COOKIEFILE, cookiefile.c_str());
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
}
curlcode = curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
curlcode = curl_easy_setopt(curl, CURLOPT_WRITEDATA, &buf);
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
curlcode = curl_easy_setopt(curl, CURLOPT_USERAGENT, "Tesseract OCR");
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
curlcode = curl_easy_perform(curl);
if (curlcode != CURLE_OK) {
return error("curl_easy_perform");
}
curl_easy_cleanup(curl);
data = reinterpret_cast<const l_uint8 *>(buf.data());
}
#else
fprintf(stderr, "Error, this tesseract has no URL support\n");
return false;
#endif
} else {
// Check whether the input file can be read.
if (FILE *file = fopen(filename, "rb")) {
fclose(file);
} else {
fprintf(stderr, "Error, cannot read input file %s: %s\n", filename, strerror(errno));
return false;
}
}
// Here is our autodetection
int format;
int r =
(data != nullptr) ? findFileFormatBuffer(data, &format) : findFileFormat(filename, &format);
// Maybe we have a filelist
if (r != 0 || format == IFF_UNKNOWN) {
std::string s;
if (data != nullptr) {
s = buf.c_str();
} else {
std::ifstream t(filename);
std::string u((std::istreambuf_iterator<char>(t)), std::istreambuf_i<|fim_middle|>curlcode = curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1L)
|
) {
#ifdef WIN32
if (_setmode(_fileno(stdin), _O_BINARY) == -1)
tprintf("ERROR: cin to binary: %s", strerror(errno));
#endif // WIN32
}
if (stream_filelist) {
return ProcessPagesFileList(stdin, nullptr, retry_config, timeout_millisec, renderer,
tesseract_->tessedit_page_number);
}
// At this point we are officially in autodection territory.
// That means any data in stdin must be buffered, to make it
// seekable.
std::string buf;
const l_uint8 *data = nullptr;
if (stdInput) {
buf.assign((std::istreambuf_iterator<char>(std::cin)), (std::istreambuf_iterator<char>()));
data = reinterpret_cast<const l_uint8 *>(buf.data());
} else if (strstr(filename, "://") != nullptr) {
// Get image or image list by URL.
#ifdef HAVE_LIBCURL
CURL *curl = curl_easy_init();
if (curl == nullptr) {
fprintf(stderr, "Error, curl_easy_init failed\n");
return false;
} else {
CURLcode curlcode;
auto error = [curl, &curlcode](const char *function) {
fprintf(stderr, "Error, %s failed with error %s\n", function, curl_easy_strerror(curlcode));
curl_easy_cleanup(curl);
return false;
};
curlcode = curl_easy_setopt(curl, CURLOPT_URL, filename);
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
curlcode = curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1L);
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
// Follow HTTP, HTTPS, FTP and FTPS redirects.
curlcode = curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1);
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
// Allow no more than 8 redirections to prevent endless loops.
curlcode = curl_easy_setopt(curl, CURLOPT_MAXREDIRS, 8);
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
int timeout = curl_timeout;
if (timeout > 0) {
|
curlcode = curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1L)
|
;
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
curlcode = curl_easy_setopt(curl, CURLOPT_TIMEOUT, timeout);
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
}
std::string cookiefile = curl_cookiefile;
if (!cookiefile.empty()) {
curlcode = curl_easy_setopt(curl, CURLOPT_COOKIEFILE, cookiefile.c_str());
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
}
curlcode = curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
curlcode = curl_easy_setopt(curl, CURLOPT_WRITEDATA, &buf);
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
curlcode = curl_easy_setopt(curl, CURLOPT_USERAGENT, "Tesseract OCR");
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
curlcode = curl_easy_perform(curl);
if (curlcode != CURLE_OK) {
return error("curl_easy_perform");
}
curl_easy_cleanup(curl);
data = reinterpret_cast<const l_uint8 *>(buf.data());
}
#else
fprintf(stderr, "Error, this tesseract has no URL support\n");
return false;
#endif
} else {
// Check whether the input file can be read.
if (FILE *file = fopen(filename, "rb")) {
fclose(file);
} else {
fprintf(stderr, "Error, cannot read input file %s: %s\n", filename, strerror(errno));
return false;
}
}
// Here is our autodetection
int format;
int r =
(data != nullptr) ? findFileFormatBuffer(data, &format) : findFileFormat(filename, &format);
// Maybe we have a filelist
if (r != 0 || format == IFF_UNKNOWN) {
std::string s;
if (data != nullptr) {
s = buf.c_str();
} else {
std::ifstream t(filename);
std::string u((std::istreambuf_iterator<char>(t)), std::istreambuf_i
|
ast_based
|
<|fim_prefix|> } break;
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_classname(const RID &p_id, const String &p_classname) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_classname.is_empty()) {
accesskit_node_set_class_name(ae->node, p_classname.utf8().ptr());
} else {
accesskit_node_clear_class_name(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_placeholder(const RID &p_id, const String &p_placeholder) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_placeholder.is_empty()) {
accesskit_node_set_placeholder(ae->node, p_placeholder.utf8().ptr());
} else {
accesskit_node_clear_placeholder(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_language(const RID &p_id, const String &p_language) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_language(ae->node, p_language.utf8().ptr());
}
void AccessibilityDriverAccessKit::accessibility_update_set_text_orientation(const RID &p_id, bool p_vertical) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (p_vertical) {
accesskit_node_set_text_direction(ae->node, ACCESSKIT_TEXT_DIRECTION_TOP_TO_BOTTOM);
} else {
<|fim_suffix|>
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_list_orientation(const RID &p_id, bool p_vertical) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (p_vertical) {
accesskit_node_set_orientation(ae->node, ACCESSKIT_ORIENTATION_VERTICAL);
} else {
accesskit_node_set_orientation(ae->node, ACCESSKIT_ORIENTATION_HORIZONTAL);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_shortcut(const RID &p_id, const String &p_shortcut) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_shortcut.is_empty()) {
accesskit_node_set_keyboard_shortcut(ae->node, p_shortcut.utf8().ptr());
} else {
accesskit_node_clear_keyboard_shortcut(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_url(const RID &p_id, const String &p_url) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_url.is_empty()) {
accesskit_node_set_url(ae->node, p_url.utf8().ptr());
} else {
accesskit_node_clear_url(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_role_description(const RID &p_id, const String &p_description) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_description.is_empty()) {
<|fim_middle|>accesskit_node_set_text_direction(ae->node, ACCESSKIT_TEXT_DIRECTION_LEFT_TO_RIGHT);
|
} break;
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_classname(const RID &p_id, const String &p_classname) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_classname.is_empty()) {
accesskit_node_set_class_name(ae->node, p_classname.utf8().ptr());
} else {
accesskit_node_clear_class_name(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_placeholder(const RID &p_id, const String &p_placeholder) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_placeholder.is_empty()) {
accesskit_node_set_placeholder(ae->node, p_placeholder.utf8().ptr());
} else {
accesskit_node_clear_placeholder(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_language(const RID &p_id, const String &p_language) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_language(ae->node, p_language.utf8().ptr());
}
void AccessibilityDriverAccessKit::accessibility_update_set_text_orientation(const RID &p_id, bool p_vertical) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (p_vertical) {
accesskit_node_set_text_direction(ae->node, ACCESSKIT_TEXT_DIRECTION_TOP_TO_BOTTOM);
} else {
|
accesskit_node_set_text_direction(ae->node, ACCESSKIT_TEXT_DIRECTION_LEFT_TO_RIGHT);
|
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_list_orientation(const RID &p_id, bool p_vertical) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (p_vertical) {
accesskit_node_set_orientation(ae->node, ACCESSKIT_ORIENTATION_VERTICAL);
} else {
accesskit_node_set_orientation(ae->node, ACCESSKIT_ORIENTATION_HORIZONTAL);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_shortcut(const RID &p_id, const String &p_shortcut) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_shortcut.is_empty()) {
accesskit_node_set_keyboard_shortcut(ae->node, p_shortcut.utf8().ptr());
} else {
accesskit_node_clear_keyboard_shortcut(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_url(const RID &p_id, const String &p_url) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_url.is_empty()) {
accesskit_node_set_url(ae->node, p_url.utf8().ptr());
} else {
accesskit_node_clear_url(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_role_description(const RID &p_id, const String &p_description) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_description.is_empty()) {
|
ast_based
|
<|fim_prefix|> " [-a=<aspectRatio>] # fix aspect ratio (fx/fy)\n"
" [-p] # fix the principal point at the center\n"
" [input_data] # input data - text file with a list of the images of the board\n"
"\n", argv[0] );
}
static void calcChessboardCorners(Size boardSize, float squareSize, vector<Point3f>& corners)
{
corners.resize(0);
for( int i = 0; i < boardSize.height; i++ )
for( int j = 0; j < boardSize.width; j++ )
corners.push_back(Point3f(float(j*squareSize),
float(i*squareSize), 0));
}
static bool run3Calibration(vector<vector<Point2f> > imagePoints1,
vector<vector<Point2f> > imagePoints2,
vector<vector<Point2f> > imagePoints3,
Size imageSize, Size boardSize,
float squareSize, float aspectRatio,
int flags,
Mat& cameraMatrix1, Mat& distCoeffs1,
Mat& cameraMatrix2, Mat& distCoeffs2,
Mat& cameraMatrix3, Mat& distCoeffs3,
Mat& R12, Mat& T12, Mat& R13, Mat& T13)
{
int c, i;
// step 1: calibrate each camera individually
vector<vector<Point3f> > objpt(1);
vector<vector<Point2f> > imgpt;
calcChessboardCorners(boardSize, squareSize, objpt[0]);
vector<Mat> rvecs, tvecs;
for( c = 1; c <= 3; c++ )
{
const vector<vector<Point2f> >& imgpt0 = c == 1 ? imagePoints1 : c == 2 ? imagePoints2 : imagePoints3;
imgpt.clear();
int N = 0;
for( i = 0; i < (int)imgpt0.size(); i++ )
if( !imgpt0[i].empty() )
{
imgpt.push_back(imgpt0[i]);
N += (int)imgpt0[i].size();
}
if( imgpt.size() < 3 )<|fim_suffix|>
objpt.resize(imgpt.size(),objpt[0]);
Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
if( flags & CALIB_FIX_ASPECT_RATIO )
cameraMatrix.at<double>(0,0) = aspectRatio;
Mat distCoeffs = Mat::zeros(5, 1, CV_64F);
double err = calibrateCamera(objpt, imgpt, imageSize, cameraMatrix,
distCoeffs, rvecs, tvecs,
flags|CALIB_FIX_K3/*|CALIB_FIX_K4|CALIB_FIX_K5|CALIB_FIX_K6*/);
bool ok = checkRange(cameraMatrix) && checkRange(distCoeffs);
if(!ok)
{
printf("Error: camera %d was not calibrated\n", c);
return false;
}
printf("Camera %d calibration reprojection error = %g\n", c, sqrt(err/N));
if( c == 1 )
cameraMatrix1 = cameraMatrix, distCoeffs1 = distCoeffs;
else if( c == 2 )
cameraMatrix2 = cameraMatrix, distCoeffs2 = distCoeffs;
else
cameraMatrix3 = cameraMatrix, distCoeffs3 = distCoeffs;
}
vector<vector<Point2f> > imgpt_right;
// step 2: calibrate (1,2) and (3,2) pairs
for( c = 2; c <= 3; c++ )
{
const vector<vector<Point2f> >& imgpt0 = c == 2 ? imagePoints2 : imagePoints3;
imgpt.clear();
imgpt_right.clear();
int N = 0;
for( i = 0; i < (int)std::min(imagePoints1.size(), imgpt0.size()); i++ )
if( !imagePoints1.empty() && !imgpt0[i].empty() )
{
imgpt.push_back(imagePoints1[i]);
imgpt_right.push_back(imgpt0[i]);
N += (int)imgpt0[i].size();
}
if( imgpt.size() < 3 )
{
printf("Error: not enough shared views for cameras 1 and %d\n", c);
return false;
}
objpt.resize(imgpt.size(),objpt[0]);
Mat cameraMatrix = c == 2 ? cameraMatrix2 : cameraMatrix3;
Mat distCoeffs = c == 2 ? distCoeffs2 : distCoeffs3;
Mat R, T, E, F;<|fim_middle|> {
printf("Error: not enough views for camera %d\n", c);
return false;
}
|
" [-a=<aspectRatio>] # fix aspect ratio (fx/fy)\n"
" [-p] # fix the principal point at the center\n"
" [input_data] # input data - text file with a list of the images of the board\n"
"\n", argv[0] );
}
static void calcChessboardCorners(Size boardSize, float squareSize, vector<Point3f>& corners)
{
corners.resize(0);
for( int i = 0; i < boardSize.height; i++ )
for( int j = 0; j < boardSize.width; j++ )
corners.push_back(Point3f(float(j*squareSize),
float(i*squareSize), 0));
}
static bool run3Calibration(vector<vector<Point2f> > imagePoints1,
vector<vector<Point2f> > imagePoints2,
vector<vector<Point2f> > imagePoints3,
Size imageSize, Size boardSize,
float squareSize, float aspectRatio,
int flags,
Mat& cameraMatrix1, Mat& distCoeffs1,
Mat& cameraMatrix2, Mat& distCoeffs2,
Mat& cameraMatrix3, Mat& distCoeffs3,
Mat& R12, Mat& T12, Mat& R13, Mat& T13)
{
int c, i;
// step 1: calibrate each camera individually
vector<vector<Point3f> > objpt(1);
vector<vector<Point2f> > imgpt;
calcChessboardCorners(boardSize, squareSize, objpt[0]);
vector<Mat> rvecs, tvecs;
for( c = 1; c <= 3; c++ )
{
const vector<vector<Point2f> >& imgpt0 = c == 1 ? imagePoints1 : c == 2 ? imagePoints2 : imagePoints3;
imgpt.clear();
int N = 0;
for( i = 0; i < (int)imgpt0.size(); i++ )
if( !imgpt0[i].empty() )
{
imgpt.push_back(imgpt0[i]);
N += (int)imgpt0[i].size();
}
if( imgpt.size() < 3 )
|
{
printf("Error: not enough views for camera %d\n", c);
return false;
}
|
objpt.resize(imgpt.size(),objpt[0]);
Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
if( flags & CALIB_FIX_ASPECT_RATIO )
cameraMatrix.at<double>(0,0) = aspectRatio;
Mat distCoeffs = Mat::zeros(5, 1, CV_64F);
double err = calibrateCamera(objpt, imgpt, imageSize, cameraMatrix,
distCoeffs, rvecs, tvecs,
flags|CALIB_FIX_K3/*|CALIB_FIX_K4|CALIB_FIX_K5|CALIB_FIX_K6*/);
bool ok = checkRange(cameraMatrix) && checkRange(distCoeffs);
if(!ok)
{
printf("Error: camera %d was not calibrated\n", c);
return false;
}
printf("Camera %d calibration reprojection error = %g\n", c, sqrt(err/N));
if( c == 1 )
cameraMatrix1 = cameraMatrix, distCoeffs1 = distCoeffs;
else if( c == 2 )
cameraMatrix2 = cameraMatrix, distCoeffs2 = distCoeffs;
else
cameraMatrix3 = cameraMatrix, distCoeffs3 = distCoeffs;
}
vector<vector<Point2f> > imgpt_right;
// step 2: calibrate (1,2) and (3,2) pairs
for( c = 2; c <= 3; c++ )
{
const vector<vector<Point2f> >& imgpt0 = c == 2 ? imagePoints2 : imagePoints3;
imgpt.clear();
imgpt_right.clear();
int N = 0;
for( i = 0; i < (int)std::min(imagePoints1.size(), imgpt0.size()); i++ )
if( !imagePoints1.empty() && !imgpt0[i].empty() )
{
imgpt.push_back(imagePoints1[i]);
imgpt_right.push_back(imgpt0[i]);
N += (int)imgpt0[i].size();
}
if( imgpt.size() < 3 )
{
printf("Error: not enough shared views for cameras 1 and %d\n", c);
return false;
}
objpt.resize(imgpt.size(),objpt[0]);
Mat cameraMatrix = c == 2 ? cameraMatrix2 : cameraMatrix3;
Mat distCoeffs = c == 2 ? distCoeffs2 : distCoeffs3;
Mat R, T, E, F;
|
random
|
<|fim_prefix|>
paste_keys(-1.0, false);
}
accept_event();
}
if (ED_IS_SHORTCUT("animation_editor/delete_selection", p_event)) {
if (!read_only) {
delete_selection();
}
accept_event();
}
}
Ref<InputEventKey> key_press = p_event;
if (key_press.is_valid() && key_press->is_pressed()) {
if (ED_IS_SHORTCUT("animation_bezier_editor/focus", p_event)) {
SelectionSet focused_keys;
if (selection.is_empty()) {
for (int i = 0; i < edit_points.size(); ++i) {
IntPair key_pair = IntPair(edit_points[i].track, edit_points[i].key);
focused_keys.insert(key_pair);
}
} else {
for (const IntPair &E : selection) {
focused_keys.insert(E);
if (E.second > 0) {
IntPair previous_key = IntPair(E.first, E.second - 1);
focused_keys.insert(previous_key);
}
if (E.second < animation->track_get_key_count(E.first) - 1) {
IntPair next_key = IntPair(E.first, E.second + 1);
focused_keys.insert(next_key);
}
}
}
if (focused_keys.is_empty()) {
accept_event();
return;
}
real_t minimum_time = Math::INF;
real_t maximum_time = -Math::INF;
real_t minimum_value = Math::INF;
real_t maximum_value = -Math::INF;
for (const IntPair &E : focused_keys) {
IntPair key_pair = E;
real_t time = animation->track_get_key_time(key_pair.first, key_pair.second);
real_t value = animation->bezier_track_get_key_value(key_pair.first, key_pair.second);
minimum_time = MIN(time, minimum_time);
maximum_time = MAX(time, maximum_time);
minimum_value = MIN(value, minimum_value);
maximum_value = MAX(value, maximum_value);
}
float width = get_size().width - timeline->get_name_limit() - timeline->get_buttons_width();
float padding = width * 0.1;
float desired_scale = (width - padding / 2.0) / (maximum_time - minimum_time);
minimum_time = MAX(0, minimum_time - (padding / 2.0) / desired_scale);
float zv = Math::pow(100 / desired_scale, 0.125f);
if (zv < 1) <|fim_suffix|>
float zoom_value = timeline->get_zoom()->get_max() - zv;
if (Math::is_finite(minimum_time) && Math::is_finite(maximum_time) && maximum_time - minimum_time > CMP_EPSILON) {
timeline->get_zoom()->set_value(zoom_value);
callable_mp((Range *)timeline, &Range::set_value).call_deferred(minimum_time);
}
if (Math::is_finite(minimum_value) && Math::is_finite(maximum_value)) {
_zoom_vertically(minimum_value, maximum_value);
}
queue_redraw();
accept_event();
return;
} else if (ED_IS_SHORTCUT("animation_bezier_editor/select_all_keys", p_event)) {
for (int i = 0; i < edit_points.size(); ++i) {
_select_at_anim(animation, edit_points[i].track, animation->track_get_key_time(edit_points[i].track, edit_points[i].key), i == 0);
}
queue_redraw();
accept_event();
return;
} else if (ED_IS_SHORTCUT("animation_bezier_editor/deselect_all_keys", p_event)) {
selection.clear();
emit_signal(SNAME("clear_selection"));
queue_redraw();
accept_event();
return;
}
}
Ref<InputEventMouseButton> mb = p_event;
int limit = timeline->get_name_limit();
if (mb.is_valid() && mb->get_button_index() == MouseButton::RIGHT && mb->is_pressed()) {
menu_insert_key = mb->get_position();
if (menu_insert_key.x >= limit && menu_insert_key.x <= get_size().width) {
if (!read_only) {
Vector2 popup_pos = get_screen_position() + mb->get_position();
bool selected = _try_select_at_ui_pos(mb->get_position(), mb->is_shift_pressed(), false);
menu->clear();
menu->add_icon_item(bezier_icon, TTR("Insert Key Here"), MENU_KEY_INSERT);
if (selected || selection.size()) {
menu->add_separator();
menu->add_icon_item(get_editor_theme_icon(SNAME("Duplicate")), TTR("Duplicate Selected Key(s)"), MENU_KEY_DUPLICATE);
menu->add_icon_item(get_editor_theme_icon(SNAME("ActionCut")), TTR("Cut Selected Key(s)"), MENU_KEY_CUT);
menu->add_icon_item(get_editor_theme_icon(SNAME("ActionCopy")), TTR("Copy Selected Key(s)"<|fim_middle|>{
zv = Math::pow(desired_scale / 100, 0.125f) - 1;
zv = 1 - zv;
}
|
paste_keys(-1.0, false);
}
accept_event();
}
if (ED_IS_SHORTCUT("animation_editor/delete_selection", p_event)) {
if (!read_only) {
delete_selection();
}
accept_event();
}
}
Ref<InputEventKey> key_press = p_event;
if (key_press.is_valid() && key_press->is_pressed()) {
if (ED_IS_SHORTCUT("animation_bezier_editor/focus", p_event)) {
SelectionSet focused_keys;
if (selection.is_empty()) {
for (int i = 0; i < edit_points.size(); ++i) {
IntPair key_pair = IntPair(edit_points[i].track, edit_points[i].key);
focused_keys.insert(key_pair);
}
} else {
for (const IntPair &E : selection) {
focused_keys.insert(E);
if (E.second > 0) {
IntPair previous_key = IntPair(E.first, E.second - 1);
focused_keys.insert(previous_key);
}
if (E.second < animation->track_get_key_count(E.first) - 1) {
IntPair next_key = IntPair(E.first, E.second + 1);
focused_keys.insert(next_key);
}
}
}
if (focused_keys.is_empty()) {
accept_event();
return;
}
real_t minimum_time = Math::INF;
real_t maximum_time = -Math::INF;
real_t minimum_value = Math::INF;
real_t maximum_value = -Math::INF;
for (const IntPair &E : focused_keys) {
IntPair key_pair = E;
real_t time = animation->track_get_key_time(key_pair.first, key_pair.second);
real_t value = animation->bezier_track_get_key_value(key_pair.first, key_pair.second);
minimum_time = MIN(time, minimum_time);
maximum_time = MAX(time, maximum_time);
minimum_value = MIN(value, minimum_value);
maximum_value = MAX(value, maximum_value);
}
float width = get_size().width - timeline->get_name_limit() - timeline->get_buttons_width();
float padding = width * 0.1;
float desired_scale = (width - padding / 2.0) / (maximum_time - minimum_time);
minimum_time = MAX(0, minimum_time - (padding / 2.0) / desired_scale);
float zv = Math::pow(100 / desired_scale, 0.125f);
if (zv < 1)
|
{
zv = Math::pow(desired_scale / 100, 0.125f) - 1;
zv = 1 - zv;
}
|
float zoom_value = timeline->get_zoom()->get_max() - zv;
if (Math::is_finite(minimum_time) && Math::is_finite(maximum_time) && maximum_time - minimum_time > CMP_EPSILON) {
timeline->get_zoom()->set_value(zoom_value);
callable_mp((Range *)timeline, &Range::set_value).call_deferred(minimum_time);
}
if (Math::is_finite(minimum_value) && Math::is_finite(maximum_value)) {
_zoom_vertically(minimum_value, maximum_value);
}
queue_redraw();
accept_event();
return;
} else if (ED_IS_SHORTCUT("animation_bezier_editor/select_all_keys", p_event)) {
for (int i = 0; i < edit_points.size(); ++i) {
_select_at_anim(animation, edit_points[i].track, animation->track_get_key_time(edit_points[i].track, edit_points[i].key), i == 0);
}
queue_redraw();
accept_event();
return;
} else if (ED_IS_SHORTCUT("animation_bezier_editor/deselect_all_keys", p_event)) {
selection.clear();
emit_signal(SNAME("clear_selection"));
queue_redraw();
accept_event();
return;
}
}
Ref<InputEventMouseButton> mb = p_event;
int limit = timeline->get_name_limit();
if (mb.is_valid() && mb->get_button_index() == MouseButton::RIGHT && mb->is_pressed()) {
menu_insert_key = mb->get_position();
if (menu_insert_key.x >= limit && menu_insert_key.x <= get_size().width) {
if (!read_only) {
Vector2 popup_pos = get_screen_position() + mb->get_position();
bool selected = _try_select_at_ui_pos(mb->get_position(), mb->is_shift_pressed(), false);
menu->clear();
menu->add_icon_item(bezier_icon, TTR("Insert Key Here"), MENU_KEY_INSERT);
if (selected || selection.size()) {
menu->add_separator();
menu->add_icon_item(get_editor_theme_icon(SNAME("Duplicate")), TTR("Duplicate Selected Key(s)"), MENU_KEY_DUPLICATE);
menu->add_icon_item(get_editor_theme_icon(SNAME("ActionCut")), TTR("Cut Selected Key(s)"), MENU_KEY_CUT);
menu->add_icon_item(get_editor_theme_icon(SNAME("ActionCopy")), TTR("Copy Selected Key(s)"
|
ast_based
|
<|fim_prefix|>t_pivot() const {
return Vector2();
}
bool AnimatedSprite2D::_edit_use_pivot() const {
return true;
}
#endif // TOOLS_ENABLED
#ifdef DEBUG_ENABLED
Rect2 AnimatedSprite2D::_edit_get_rect() const {
return _get_rect();
}
bool AnimatedSprite2D::_edit_use_rect() const {
if (frames.is_null() || !frames->has_animation(animation)) {
return false;
}
if (frame < 0 || frame >= frames->get_frame_count(animation)) {
return false;
}
Ref<Texture2D> t;
if (animation) {
t = frames->get_frame_texture(animation, frame);
}
return t.is_valid();
}
#endif // DEBUG_ENABLED
Rect2 AnimatedSprite2D::get_anchorable_rect() const {
return _get_rect();
}
Rect2 AnimatedSprite2D::_get_rect() const {
if (frames.is_null() || !frames->has_animation(animation)) {
return Rect2();
}
if (frame < 0 || frame >= frames->get_frame_count(animation)) {
return Rect2();
}
Ref<Texture2D> t;
if (animation) {
t = frames->get_frame_texture(animation, frame);
}
if (t.is_null()) {
return Rect2();
}
Size2 s = t->get_size();
Point2 ofs = offset;
if (centered) {
ofs -= s / 2;
}
if (s == Size2(0, 0)) {
s = Size2(1, 1);
}
return Rect2(ofs, s);
}
void AnimatedSprite2D::_validate_property(PropertyInfo &p_property) const {
if (frames.is_null()) {
return;
}
if (!Engine::get_singleton()->is_editor_hint()) {
if (p_property.name == "frame" && playing) {
p_property.usage = PROPERTY_USAGE_EDITOR | PROPERTY_USAGE_READ_ONLY;
}
return;
}
if (p_property.name == "animation") {
List<StringName> names;
frames->get_animation_list(&names);
names.sort_custom<StringName::AlphCompare>();
bool current_found = false;
bool is_first_element = true;
for (const StringName &E : names) {
if (!is_first_element) {
p_property.hint_string += ",";
} else {
is_first_element = false;
}
p_property.hint_string += String(E);
if (animation == E) {
current_found = true;
}
}
if (!current_found) {
if (p_property.hint_string.is_empty()) {
<|fim_suffix|>
} else {
p_property.hint_string = String(animation) + "," + p_property.hint_string;
}
}
return;
}
if (p_property.name == "frame") {
if (playing) {
p_property.usage = PROPERTY_USAGE_EDITOR | PROPERTY_USAGE_READ_ONLY;
return;
}
p_property.hint = PROPERTY_HINT_RANGE;
if (frames->has_animation(animation) && frames->get_frame_count(animation) > 0) {
p_property.hint_string = "0," + itos(frames->get_frame_count(animation) - 1) + ",1";
} else {
// Avoid an error, `hint_string` is required for `PROPERTY_HINT_RANGE`.
p_property.hint_string = "0,0,1";
}
p_property.usage |= PROPERTY_USAGE_KEYING_INCREMENTS;
}
}
void AnimatedSprite2D::_notification(int p_what) {
switch (p_what) {
case NOTIFICATION_ACCESSIBILITY_UPDATE: {
RID ae = get_accessibility_element();
ERR_FAIL_COND(ae.is_null());
Rect2 dst_rect = _get_rect();
DisplayServer::get_singleton()->accessibility_update_set_role(ae, DisplayServer::AccessibilityRole::ROLE_IMAGE);
DisplayServer::get_singleton()->accessibility_update_set_transform(ae, get_transform());
DisplayServer::get_singleton()->accessibility_update_set_bounds(ae, dst_rect);
} break;
case NOTIFICATION_READY: {
if (!Engine::get_singleton()->is_editor_hint() && frames.is_valid() && frames->has_animation(autoplay)) {
play(autoplay);
}
} break;
case NOTIFICATION_INTERNAL_PROCESS: {
if (frames.is_null() || !frames->has_animation(animation)) {
return;
}
double remaining = get_process_delta_time();
int i = 0;
while (remaining) {
// Animation speed may be changed by animation_finished or frame_changed signals.
double speed = frames->get_animation_speed(animation) * speed_scale * custom_speed_scale * frame_speed_scale;
double abs_speed = Math::abs(speed);
if (speed == 0) {
return; // Do nothing.
}
// Frame count may be changed by animation_finished or frame_changed signals.
int fc = frames->get_frame_count(animation);
in<|fim_middle|>p_property.hint_string = String(animation);
|
t_pivot() const {
return Vector2();
}
bool AnimatedSprite2D::_edit_use_pivot() const {
return true;
}
#endif // TOOLS_ENABLED
#ifdef DEBUG_ENABLED
Rect2 AnimatedSprite2D::_edit_get_rect() const {
return _get_rect();
}
bool AnimatedSprite2D::_edit_use_rect() const {
if (frames.is_null() || !frames->has_animation(animation)) {
return false;
}
if (frame < 0 || frame >= frames->get_frame_count(animation)) {
return false;
}
Ref<Texture2D> t;
if (animation) {
t = frames->get_frame_texture(animation, frame);
}
return t.is_valid();
}
#endif // DEBUG_ENABLED
Rect2 AnimatedSprite2D::get_anchorable_rect() const {
return _get_rect();
}
Rect2 AnimatedSprite2D::_get_rect() const {
if (frames.is_null() || !frames->has_animation(animation)) {
return Rect2();
}
if (frame < 0 || frame >= frames->get_frame_count(animation)) {
return Rect2();
}
Ref<Texture2D> t;
if (animation) {
t = frames->get_frame_texture(animation, frame);
}
if (t.is_null()) {
return Rect2();
}
Size2 s = t->get_size();
Point2 ofs = offset;
if (centered) {
ofs -= s / 2;
}
if (s == Size2(0, 0)) {
s = Size2(1, 1);
}
return Rect2(ofs, s);
}
void AnimatedSprite2D::_validate_property(PropertyInfo &p_property) const {
if (frames.is_null()) {
return;
}
if (!Engine::get_singleton()->is_editor_hint()) {
if (p_property.name == "frame" && playing) {
p_property.usage = PROPERTY_USAGE_EDITOR | PROPERTY_USAGE_READ_ONLY;
}
return;
}
if (p_property.name == "animation") {
List<StringName> names;
frames->get_animation_list(&names);
names.sort_custom<StringName::AlphCompare>();
bool current_found = false;
bool is_first_element = true;
for (const StringName &E : names) {
if (!is_first_element) {
p_property.hint_string += ",";
} else {
is_first_element = false;
}
p_property.hint_string += String(E);
if (animation == E) {
current_found = true;
}
}
if (!current_found) {
if (p_property.hint_string.is_empty()) {
|
p_property.hint_string = String(animation);
|
} else {
p_property.hint_string = String(animation) + "," + p_property.hint_string;
}
}
return;
}
if (p_property.name == "frame") {
if (playing) {
p_property.usage = PROPERTY_USAGE_EDITOR | PROPERTY_USAGE_READ_ONLY;
return;
}
p_property.hint = PROPERTY_HINT_RANGE;
if (frames->has_animation(animation) && frames->get_frame_count(animation) > 0) {
p_property.hint_string = "0," + itos(frames->get_frame_count(animation) - 1) + ",1";
} else {
// Avoid an error, `hint_string` is required for `PROPERTY_HINT_RANGE`.
p_property.hint_string = "0,0,1";
}
p_property.usage |= PROPERTY_USAGE_KEYING_INCREMENTS;
}
}
void AnimatedSprite2D::_notification(int p_what) {
switch (p_what) {
case NOTIFICATION_ACCESSIBILITY_UPDATE: {
RID ae = get_accessibility_element();
ERR_FAIL_COND(ae.is_null());
Rect2 dst_rect = _get_rect();
DisplayServer::get_singleton()->accessibility_update_set_role(ae, DisplayServer::AccessibilityRole::ROLE_IMAGE);
DisplayServer::get_singleton()->accessibility_update_set_transform(ae, get_transform());
DisplayServer::get_singleton()->accessibility_update_set_bounds(ae, dst_rect);
} break;
case NOTIFICATION_READY: {
if (!Engine::get_singleton()->is_editor_hint() && frames.is_valid() && frames->has_animation(autoplay)) {
play(autoplay);
}
} break;
case NOTIFICATION_INTERNAL_PROCESS: {
if (frames.is_null() || !frames->has_animation(animation)) {
return;
}
double remaining = get_process_delta_time();
int i = 0;
while (remaining) {
// Animation speed may be changed by animation_finished or frame_changed signals.
double speed = frames->get_animation_speed(animation) * speed_scale * custom_speed_scale * frame_speed_scale;
double abs_speed = Math::abs(speed);
if (speed == 0) {
return; // Do nothing.
}
// Frame count may be changed by animation_finished or frame_changed signals.
int fc = frames->get_frame_count(animation);
in
|
ast_based
|
<|fim_prefix|> "Usage: %s\n"
" -w=<board_width> # the number of inner corners per one of board dimension\n"
" -h=<board_height> # the number of inner corners per another board dimension\n"
" [-s=<squareSize>] # square size in some user-defined units (1 by default)\n"
" [-o=<out_camera_params>] # the output filename for intrinsic [and extrinsic] parameters\n"
" [-zt] # assume zero tangential distortion\n"
" [-a=<aspectRatio>] # fix aspect ratio (fx/fy)\n"
" [-p] # fix the principal point at the center\n"
" [input_data] # input data - text file with a list of the images of the board\n"
"\n", argv[0] );
}
static void calcChessboardCorners(Size boardSize, float squareSize, vector<Point3f>& corners)
{
corners.resize(0);
for( int i = 0; i < boardSize.height; i++ )
for( int j = 0; j < boardSize.width; j++ )
corners.push_back(Point3f(float(j*squareSize),
float(i*squareSize), 0));
}
static bool run3Calibration(vector<vector<Point2f> > imagePoints1,
vector<vector<Point2f> > imagePoints2,
vector<vector<Point2f> > imagePoints3,
Size imageSize, Size boardSize,
float squareSize, float aspectRatio,
int flags,
Mat& cameraMatrix1, Mat& distCoeffs1,
Mat& cameraMatrix2, Mat& distCoeffs2,
Mat& cameraMatrix3, Mat& distCoeffs3,
Mat& R12, Mat& T12, Mat& R13, Mat& T13)
{
int c, i;
// step 1: calibrate each camera individually
vector<vector<Point3f> > objpt(1);<|fim_suffix|> if( !imgpt0[i].empty() )
{
imgpt.push_back(imgpt0[i]);
N += (int)imgpt0[i].size();
}
if( imgpt.size() < 3 )
{
printf("Error: not enough views for camera %d\n", c);
return false;
}
objpt.resize(imgpt.size(),objpt[0]);
Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
if( flags & CALIB_FIX_ASPECT_RATIO )
cameraMatrix.at<double>(0,0) = aspectRatio;
Mat distCoeffs = Mat::zeros(5, 1, CV_64F);
double err = calibrateCamera(objpt, imgpt, imageSize, cameraMatrix,
distCoeffs, rvecs, tvecs,
flags|CALIB_FIX_K3/*|CALIB_FIX_K4|CALIB_FIX_K5|CALIB_FIX_K6*/);
bool ok = checkRange(cameraMatrix) && checkRange(distCoeffs);
if(!ok)
{
printf("Error: camera %d was not calibrated\n", c);
return false;
}
printf("Camera %d calibration reprojection error = %g\n", c, sqrt(err/N));
if( c == 1 )
cameraMatrix1 = cameraMatrix, distCoeffs1 = distCoeffs;
else if( c == 2 )
cameraMatrix2 = cameraMatrix, distCoeffs2 = distCoeffs;
else
cameraMatrix3 = cameraMatrix, distCoeffs3 = distCoeffs;
}
vector<vector<Point2f> > imgpt_right;
// step 2: calibrate (1,2) and (3,2) pairs
for( c = 2; c <= 3; c++ )
{
const vector<vector<Point2f> >& imgpt0 = c == 2 ? imagePoints2 : imagePoints3;
imgpt.clear();
imgpt_right.clear();
int N = 0;
for( i = 0; i < (int)std::min(imagePoints1.size(), imgpt0.size()); i++ )
if( !imagePoints1.empty() && !imgpt0[i].empty() )
{
imgpt.push_back(imagePoints1[i]);
imgpt_right.push_back(imgpt0[i]);
N += (int)imgpt0[i].size();
}
if( imgpt.size() < 3 )
{<|fim_middle|> vector<vector<Point2f> > imgpt;
calcChessboardCorners(boardSize, squareSize, objpt[0]);
vector<Mat> rvecs, tvecs;
for( c = 1; c <= 3; c++ )
{
const vector<vector<Point2f> >& imgpt0 = c == 1 ? imagePoints1 : c == 2 ? imagePoints2 : imagePoints3;
imgpt.clear();
int N = 0;
for( i = 0; i < (int)imgpt0.size(); i++ )
|
"Usage: %s\n"
" -w=<board_width> # the number of inner corners per one of board dimension\n"
" -h=<board_height> # the number of inner corners per another board dimension\n"
" [-s=<squareSize>] # square size in some user-defined units (1 by default)\n"
" [-o=<out_camera_params>] # the output filename for intrinsic [and extrinsic] parameters\n"
" [-zt] # assume zero tangential distortion\n"
" [-a=<aspectRatio>] # fix aspect ratio (fx/fy)\n"
" [-p] # fix the principal point at the center\n"
" [input_data] # input data - text file with a list of the images of the board\n"
"\n", argv[0] );
}
static void calcChessboardCorners(Size boardSize, float squareSize, vector<Point3f>& corners)
{
corners.resize(0);
for( int i = 0; i < boardSize.height; i++ )
for( int j = 0; j < boardSize.width; j++ )
corners.push_back(Point3f(float(j*squareSize),
float(i*squareSize), 0));
}
static bool run3Calibration(vector<vector<Point2f> > imagePoints1,
vector<vector<Point2f> > imagePoints2,
vector<vector<Point2f> > imagePoints3,
Size imageSize, Size boardSize,
float squareSize, float aspectRatio,
int flags,
Mat& cameraMatrix1, Mat& distCoeffs1,
Mat& cameraMatrix2, Mat& distCoeffs2,
Mat& cameraMatrix3, Mat& distCoeffs3,
Mat& R12, Mat& T12, Mat& R13, Mat& T13)
{
int c, i;
// step 1: calibrate each camera individually
vector<vector<Point3f> > objpt(1);
|
vector<vector<Point2f> > imgpt;
calcChessboardCorners(boardSize, squareSize, objpt[0]);
vector<Mat> rvecs, tvecs;
for( c = 1; c <= 3; c++ )
{
const vector<vector<Point2f> >& imgpt0 = c == 1 ? imagePoints1 : c == 2 ? imagePoints2 : imagePoints3;
imgpt.clear();
int N = 0;
for( i = 0; i < (int)imgpt0.size(); i++ )
|
if( !imgpt0[i].empty() )
{
imgpt.push_back(imgpt0[i]);
N += (int)imgpt0[i].size();
}
if( imgpt.size() < 3 )
{
printf("Error: not enough views for camera %d\n", c);
return false;
}
objpt.resize(imgpt.size(),objpt[0]);
Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
if( flags & CALIB_FIX_ASPECT_RATIO )
cameraMatrix.at<double>(0,0) = aspectRatio;
Mat distCoeffs = Mat::zeros(5, 1, CV_64F);
double err = calibrateCamera(objpt, imgpt, imageSize, cameraMatrix,
distCoeffs, rvecs, tvecs,
flags|CALIB_FIX_K3/*|CALIB_FIX_K4|CALIB_FIX_K5|CALIB_FIX_K6*/);
bool ok = checkRange(cameraMatrix) && checkRange(distCoeffs);
if(!ok)
{
printf("Error: camera %d was not calibrated\n", c);
return false;
}
printf("Camera %d calibration reprojection error = %g\n", c, sqrt(err/N));
if( c == 1 )
cameraMatrix1 = cameraMatrix, distCoeffs1 = distCoeffs;
else if( c == 2 )
cameraMatrix2 = cameraMatrix, distCoeffs2 = distCoeffs;
else
cameraMatrix3 = cameraMatrix, distCoeffs3 = distCoeffs;
}
vector<vector<Point2f> > imgpt_right;
// step 2: calibrate (1,2) and (3,2) pairs
for( c = 2; c <= 3; c++ )
{
const vector<vector<Point2f> >& imgpt0 = c == 2 ? imagePoints2 : imagePoints3;
imgpt.clear();
imgpt_right.clear();
int N = 0;
for( i = 0; i < (int)std::min(imagePoints1.size(), imgpt0.size()); i++ )
if( !imagePoints1.empty() && !imgpt0[i].empty() )
{
imgpt.push_back(imagePoints1[i]);
imgpt_right.push_back(imgpt0[i]);
N += (int)imgpt0[i].size();
}
if( imgpt.size() < 3 )
{
|
random
|
<|fim_prefix|> ASSERT_HOST(false);
default:
break;
}
const std::unique_ptr<const char[]> para_text(it->GetUTF8Text(RIL_PARA));
text += para_text.get();
} while (it->Next(RIL_PARA));
return copy_string(text);
}
static void AddBoxToTSV(const PageIterator *it, PageIteratorLevel level, std::string &text) {
int left, top, right, bottom;
it->BoundingBox(level, &left, &top, &right, &bottom);
text += "\t" + std::to_string(left);
text += "\t" + std::to_string(top);
text += "\t" + std::to_string(right - left);
text += "\t" + std::to_string(bottom - top);
}
/**
* Make a TSV-formatted string from the internal data structures.
* page_number is 0-based but will appear in the output as 1-based.
* Returned string must be freed with the delete [] operator.
*/
char *TessBaseAPI::GetTSVText(int page_number) {
if (tesseract_ == nullptr || (page_res_ == nullptr && Recognize(nullptr) < 0)) {
return nullptr;
}
#if !defined(NDEBUG)
int lcnt = 1, bcnt = 1, pcnt = 1, wcnt = 1;
#endif
int page_id = page_number + 1; // we use 1-based page numbers.
int page_num = page_id;
int block_num = 0;
int par_num = 0;
int line_num = 0;
int word_num = 0;
std::string tsv_str;
tsv_str += "1\t" + std::to_string(page_num); // level 1 - page
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
tsv_str += "\t" + std::to_string(rect_left_);
tsv_str += "\t" + std::to_string(rect_top_);
tsv_str += "\t" + std::to_string(rect_width_);
tsv_str += "\t" + std::to_string(rect_height_);
tsv_str += "\t-1\t\n";
const std::unique_ptr</*non-const*/ ResultIterator> res_it(GetIterator());
while (!res_it->Empty(RIL_BLOCK)) {
if (res_it->Empty(RIL_WORD)) {
res_it->Next(RIL_WORD);
continue;
}
// Add rows for any new block/paragraph/textline.
if (res_it->IsAtBeginningOf(RIL_BLOCK)) {
<|fim_suffix|>
line_num = 0;
word_num = 0;
tsv_str += "2\t" + std::to_string(page_num); // level 2 - block
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_BLOCK, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for block
}
if (res_it->IsAtBeginningOf(RIL_PARA)) {
par_num++;
line_num = 0;
word_num = 0;
tsv_str += "3\t" + std::to_string(page_num); // level 3 - paragraph
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_PARA, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for para
}
if (res_it->IsAtBeginningOf(RIL_TEXTLINE)) {
line_num++;
word_num = 0;
tsv_str += "4\t" + std::to_string(page_num); // level 4 - line
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_TEXTLINE, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for line
}
// Now, process the word...
int left, top, right, bottom;
res_it->BoundingBox(RIL_WORD, &left, &top, &right, &bottom);
word_num++;
tsv_str += "5\t" + std::to_string(page_num); // level 5 - word
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
tsv_str += "\t" + std::to_string(left);
tsv_str += "\t" + std::to_string(top);
tsv_str += "\t" + std::to_string(right - left);
tsv_str += "\t" + std::to_string(bottom - top);
tsv_str += "\t" + s<|fim_middle|>block_num++;
par_num = 0;
|
ASSERT_HOST(false);
default:
break;
}
const std::unique_ptr<const char[]> para_text(it->GetUTF8Text(RIL_PARA));
text += para_text.get();
} while (it->Next(RIL_PARA));
return copy_string(text);
}
static void AddBoxToTSV(const PageIterator *it, PageIteratorLevel level, std::string &text) {
int left, top, right, bottom;
it->BoundingBox(level, &left, &top, &right, &bottom);
text += "\t" + std::to_string(left);
text += "\t" + std::to_string(top);
text += "\t" + std::to_string(right - left);
text += "\t" + std::to_string(bottom - top);
}
/**
* Make a TSV-formatted string from the internal data structures.
* page_number is 0-based but will appear in the output as 1-based.
* Returned string must be freed with the delete [] operator.
*/
char *TessBaseAPI::GetTSVText(int page_number) {
if (tesseract_ == nullptr || (page_res_ == nullptr && Recognize(nullptr) < 0)) {
return nullptr;
}
#if !defined(NDEBUG)
int lcnt = 1, bcnt = 1, pcnt = 1, wcnt = 1;
#endif
int page_id = page_number + 1; // we use 1-based page numbers.
int page_num = page_id;
int block_num = 0;
int par_num = 0;
int line_num = 0;
int word_num = 0;
std::string tsv_str;
tsv_str += "1\t" + std::to_string(page_num); // level 1 - page
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
tsv_str += "\t" + std::to_string(rect_left_);
tsv_str += "\t" + std::to_string(rect_top_);
tsv_str += "\t" + std::to_string(rect_width_);
tsv_str += "\t" + std::to_string(rect_height_);
tsv_str += "\t-1\t\n";
const std::unique_ptr</*non-const*/ ResultIterator> res_it(GetIterator());
while (!res_it->Empty(RIL_BLOCK)) {
if (res_it->Empty(RIL_WORD)) {
res_it->Next(RIL_WORD);
continue;
}
// Add rows for any new block/paragraph/textline.
if (res_it->IsAtBeginningOf(RIL_BLOCK)) {
|
block_num++;
par_num = 0;
|
line_num = 0;
word_num = 0;
tsv_str += "2\t" + std::to_string(page_num); // level 2 - block
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_BLOCK, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for block
}
if (res_it->IsAtBeginningOf(RIL_PARA)) {
par_num++;
line_num = 0;
word_num = 0;
tsv_str += "3\t" + std::to_string(page_num); // level 3 - paragraph
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_PARA, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for para
}
if (res_it->IsAtBeginningOf(RIL_TEXTLINE)) {
line_num++;
word_num = 0;
tsv_str += "4\t" + std::to_string(page_num); // level 4 - line
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_TEXTLINE, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for line
}
// Now, process the word...
int left, top, right, bottom;
res_it->BoundingBox(RIL_WORD, &left, &top, &right, &bottom);
word_num++;
tsv_str += "5\t" + std::to_string(page_num); // level 5 - word
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
tsv_str += "\t" + std::to_string(left);
tsv_str += "\t" + std::to_string(top);
tsv_str += "\t" + std::to_string(right - left);
tsv_str += "\t" + std::to_string(bottom - top);
tsv_str += "\t" + s
|
ast_based
|
<|fim_prefix|>e ALTO XML for the end of the document
///
bool TessAltoRenderer::EndDocumentHandler() {
AppendString("\t</Layout>\n</alto>\n");
return true;
}
TessAltoRenderer::TessAltoRenderer(const char *outputbase)
: TessResultRenderer(outputbase, "xml"),
begin_document(false) {}
///
/// Make an XML-formatted string with ALTO markup from the internal
/// data structures.
///
char *TessBaseAPI::GetAltoText(int page_number) {
return GetAltoText(nullptr, page_number);
}
///
/// Make an XML-formatted string with ALTO markup from the internal
/// data structures.
///
char *TessBaseAPI::GetAltoText(ETEXT_DESC *monitor, int page_number) {
if (tesseract_ == nullptr || (page_res_ == nullptr && Recognize(monitor) < 0)) {
return nullptr;
}
int lcnt = 0, tcnt = 0, bcnt = 0, wcnt = 0;
if (input_file_.empty()) {
SetInputName(nullptr);
}
std::stringstream alto_str;
// Use "C" locale (needed for int values larger than 999).
alto_str.imbue(std::locale::classic());
alto_str << "\t\t<Page WIDTH=\"" << rect_width_ << "\" HEIGHT=\"" << rect_height_
<< "\" PHYSICAL_IMG_NR=\"" << page_number << "\""
<< " ID=\"page_" << page_number << "\">\n"
<< "\t\t\t<PrintSpace HPOS=\"0\" VPOS=\"0\""
<< " WIDTH=\"" << rect_width_ << "\""
<< " HEIGHT=\"" << rect_height_ << "\">\n";
std::unique_ptr<ResultIterator> res_it(GetIterator());
while (!res_it->Empty(RIL_BLOCK)) {
if (res_it->Empty(RIL_WORD)) {
res_it->Next(RIL_WORD);
continue;
}
int left, top, right, bottom;
auto block_type = res_it->BlockType();
switch (block_type) {
case PT_FLOWING_IMAGE:
case PT_HEADING_IMAGE:
case PT_PULLOUT_IMAGE: {
// Handle all kinds of images.
// TODO: optionally add TYPE, for example TYPE="photo".
alto_str << "\t\t\t\t<Illustration ID=\"" << GetID("cblock", page_number, bcnt++) << "\"";
AddBoxToAlto(res_it.get(), RIL_BLOCK, alto_str);
<|fim_suffix|>
}
case PT_HORZ_LINE:
case PT_VERT_LINE:
// Handle horizontal and vertical lines.
alto_str << "\t\t\t\t<GraphicalElement ID=\"" << GetID("cblock", page_number, bcnt++) << "\"";
AddBoxToAlto(res_it.get(), RIL_BLOCK, alto_str);
alto_str << "</GraphicalElement >\n";
res_it->Next(RIL_BLOCK);
continue;
case PT_NOISE:
tprintf("TODO: Please report image which triggers the noise case.\n");
ASSERT_HOST(false);
default:
break;
}
if (res_it->IsAtBeginningOf(RIL_BLOCK)) {
alto_str << "\t\t\t\t<ComposedBlock ID=\"" << GetID("cblock", page_number, bcnt) << "\"";
AddBoxToAlto(res_it.get(), RIL_BLOCK, alto_str);
alto_str << "\n";
}
if (res_it->IsAtBeginningOf(RIL_PARA)) {
alto_str << "\t\t\t\t\t<TextBlock ID=\"" << GetID("block", page_number, tcnt) << "\"";
AddBoxToAlto(res_it.get(), RIL_PARA, alto_str);
alto_str << "\n";
}
if (res_it->IsAtBeginningOf(RIL_TEXTLINE)) {
alto_str << "\t\t\t\t\t\t<TextLine ID=\"" << GetID("line", page_number, lcnt) << "\"";
AddBoxToAlto(res_it.get(), RIL_TEXTLINE, alto_str);
alto_str << "\n";
}
alto_str << "\t\t\t\t\t\t\t<String ID=\"" << GetID("string", page_number, wcnt) << "\"";
AddBoxToAlto(res_it.get(), RIL_WORD, alto_str);
alto_str << " CONTENT=\"";
bool last_word_in_line = res_it->IsAtFinalElement(RIL_TEXTLINE, RIL_WORD);
bool last_word_in_tblock = res_it->IsAtFinalElement(RIL_PARA, RIL_WORD);
bool last_word_in_cblock = res_it->IsAtFinalElement(RIL_BLOCK, RIL_WORD);
res_it->BoundingBox(RIL_WORD, &left, &top, &right, &bottom);
do {
const std::unique_ptr<const char[]> grapheme(res_it->GetUTF8Text(RIL_SYMBOL));
if (grapheme && grapheme[0] != 0) {
alto_str << HOcrEscape(grapheme.get()).c_str();
}
res_it->Next(RIL_SYMBOL);
} while (!res_it->Empty(RIL_BLOCK) && !res_it->IsAtBeginningOf(RIL_WORD));<|fim_middle|>alto_str << "</Illustration>\n";
res_it->Next(RIL_BLOCK);
continue;
|
e ALTO XML for the end of the document
///
bool TessAltoRenderer::EndDocumentHandler() {
AppendString("\t</Layout>\n</alto>\n");
return true;
}
TessAltoRenderer::TessAltoRenderer(const char *outputbase)
: TessResultRenderer(outputbase, "xml"),
begin_document(false) {}
///
/// Make an XML-formatted string with ALTO markup from the internal
/// data structures.
///
char *TessBaseAPI::GetAltoText(int page_number) {
return GetAltoText(nullptr, page_number);
}
///
/// Make an XML-formatted string with ALTO markup from the internal
/// data structures.
///
char *TessBaseAPI::GetAltoText(ETEXT_DESC *monitor, int page_number) {
if (tesseract_ == nullptr || (page_res_ == nullptr && Recognize(monitor) < 0)) {
return nullptr;
}
int lcnt = 0, tcnt = 0, bcnt = 0, wcnt = 0;
if (input_file_.empty()) {
SetInputName(nullptr);
}
std::stringstream alto_str;
// Use "C" locale (needed for int values larger than 999).
alto_str.imbue(std::locale::classic());
alto_str << "\t\t<Page WIDTH=\"" << rect_width_ << "\" HEIGHT=\"" << rect_height_
<< "\" PHYSICAL_IMG_NR=\"" << page_number << "\""
<< " ID=\"page_" << page_number << "\">\n"
<< "\t\t\t<PrintSpace HPOS=\"0\" VPOS=\"0\""
<< " WIDTH=\"" << rect_width_ << "\""
<< " HEIGHT=\"" << rect_height_ << "\">\n";
std::unique_ptr<ResultIterator> res_it(GetIterator());
while (!res_it->Empty(RIL_BLOCK)) {
if (res_it->Empty(RIL_WORD)) {
res_it->Next(RIL_WORD);
continue;
}
int left, top, right, bottom;
auto block_type = res_it->BlockType();
switch (block_type) {
case PT_FLOWING_IMAGE:
case PT_HEADING_IMAGE:
case PT_PULLOUT_IMAGE: {
// Handle all kinds of images.
// TODO: optionally add TYPE, for example TYPE="photo".
alto_str << "\t\t\t\t<Illustration ID=\"" << GetID("cblock", page_number, bcnt++) << "\"";
AddBoxToAlto(res_it.get(), RIL_BLOCK, alto_str);
|
alto_str << "</Illustration>\n";
res_it->Next(RIL_BLOCK);
continue;
|
}
case PT_HORZ_LINE:
case PT_VERT_LINE:
// Handle horizontal and vertical lines.
alto_str << "\t\t\t\t<GraphicalElement ID=\"" << GetID("cblock", page_number, bcnt++) << "\"";
AddBoxToAlto(res_it.get(), RIL_BLOCK, alto_str);
alto_str << "</GraphicalElement >\n";
res_it->Next(RIL_BLOCK);
continue;
case PT_NOISE:
tprintf("TODO: Please report image which triggers the noise case.\n");
ASSERT_HOST(false);
default:
break;
}
if (res_it->IsAtBeginningOf(RIL_BLOCK)) {
alto_str << "\t\t\t\t<ComposedBlock ID=\"" << GetID("cblock", page_number, bcnt) << "\"";
AddBoxToAlto(res_it.get(), RIL_BLOCK, alto_str);
alto_str << "\n";
}
if (res_it->IsAtBeginningOf(RIL_PARA)) {
alto_str << "\t\t\t\t\t<TextBlock ID=\"" << GetID("block", page_number, tcnt) << "\"";
AddBoxToAlto(res_it.get(), RIL_PARA, alto_str);
alto_str << "\n";
}
if (res_it->IsAtBeginningOf(RIL_TEXTLINE)) {
alto_str << "\t\t\t\t\t\t<TextLine ID=\"" << GetID("line", page_number, lcnt) << "\"";
AddBoxToAlto(res_it.get(), RIL_TEXTLINE, alto_str);
alto_str << "\n";
}
alto_str << "\t\t\t\t\t\t\t<String ID=\"" << GetID("string", page_number, wcnt) << "\"";
AddBoxToAlto(res_it.get(), RIL_WORD, alto_str);
alto_str << " CONTENT=\"";
bool last_word_in_line = res_it->IsAtFinalElement(RIL_TEXTLINE, RIL_WORD);
bool last_word_in_tblock = res_it->IsAtFinalElement(RIL_PARA, RIL_WORD);
bool last_word_in_cblock = res_it->IsAtFinalElement(RIL_BLOCK, RIL_WORD);
res_it->BoundingBox(RIL_WORD, &left, &top, &right, &bottom);
do {
const std::unique_ptr<const char[]> grapheme(res_it->GetUTF8Text(RIL_SYMBOL));
if (grapheme && grapheme[0] != 0) {
alto_str << HOcrEscape(grapheme.get()).c_str();
}
res_it->Next(RIL_SYMBOL);
} while (!res_it->Empty(RIL_BLOCK) && !res_it->IsAtBeginningOf(RIL_WORD));
|
ast_based
|
<|fim_prefix|> const std::string & name = it.first;
llama_adapter_lora_weight & w = it.second;
bool is_token_embd = str_endswith(name, "token_embd.weight");
if (!w.a || !w.b) {
throw std::runtime_error("LoRA tensor pair for '" + name + "' is missing one component");
}
// device buft and device ctx
const auto * model_tensor = model.get_tensor(name.c_str());
if (!model_tensor) {
throw std::runtime_error("LoRA tensor '" + name + "' does not exist in base model (hint: maybe wrong base model?)");
}
auto * buft = ggml_backend_buffer_get_type(model_tensor->buffer);
// do not load loras to extra buffer types (i.e. bufts for repacking) -> use the CPU in that case
for (auto & ex : buft_extra) {
if (ex == buft) {
LLAMA_LOG_WARN("%s: lora for '%s' cannot use buft '%s', fallback to CPU\n", __func__, model_tensor->name, ggml_backend_buft_name(buft));
auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
if (!cpu_dev) {
throw std::runtime_error(format("%s: no CPU backend found", __func__));
}
buft = ggml_backend_dev_buffer_type(cpu_dev);
break;
}
}
LLAMA_LOG_DEBUG("%s: lora for '%s' -> '%s'\n", __func__, model_tensor->name, ggml_backend_buft_name(buft));
ggml_context * dev_ctx = ctx_for_buft(buft);
// validate tensor shape
if (is_token_embd) {
// expect B to be non-transposed, A and B are flipped; see llm_build_inp_embd()
if (model_tensor->ne[0] != w.b->ne[1] || model_tensor->ne[1] != w.a->ne[1]) {
throw std::runtime_error("tensor '" + name + "' has incorrect shape (hint: maybe wrong base model?)");
}
} else {
if (model_tensor->ne[0] != w.a->ne[0] || model_tensor->ne[1] != w.b->ne[1]) {<|fim_suffix|> }
if (w.a->ne[1] != w.b->ne[0]) {
throw std::runtime_error("lora_a tensor is not transposed (hint: adapter from \"finetune\" example is no longer supported)");
}
}
// save tensor to adapter
ggml_tensor * tensor_a = ggml_dup_tensor(dev_ctx, w.a);
ggml_tensor * tensor_b = ggml_dup_tensor(dev_ctx, w.b);
ggml_set_name(tensor_a, w.a->name);
ggml_set_name(tensor_b, w.b->name);
adapter.ab_map[name] = llama_adapter_lora_weight(tensor_a, tensor_b);
}
// allocate tensors / buffers and zero
{
adapter.ctxs.reserve(ctx_map.size());
adapter.bufs.reserve(ctx_map.size());
for (auto & it : ctx_map) {
ggml_backend_buffer_type_t buft = it.first;
ggml_context * ctx_dev = it.second;
ggml_backend_buffer_ptr buf { ggml_backend_alloc_ctx_tensors_from_buft(ctx_dev, buft) };
if (!buf) {
throw std::runtime_error("failed to allocate buffer for lora adapter\n");
}
LLAMA_LOG_INFO("%s: %10s LoRA buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf.get()), ggml_backend_buffer_get_size(buf.get())/1024.0/1024.0);
adapter.bufs.emplace_back(std::move(buf));
}
}
// set tensor data
{
llama_file gguf_file(path_lora, "rb");
std::vector<uint8_t> read_buf;
auto set_tensor = [&](ggml_tensor * orig, ggml_tensor * dev) {
size_t offs = gguf_get_data_offset(ctx_gguf.get()) + gguf_get_tensor_offset(ctx_gguf.get(), gguf_find_tensor(ctx_gguf.get(), orig->name));
size_t size = ggml_nbytes(orig);
read_buf.resize(size);
gguf_file.seek(offs, SEEK_SET);
gguf_file.read_raw(read_buf.data(), size);
ggml_backend_tensor_set(dev, read_buf.data(), 0, size);
};
for (auto & it : adapter.ab_map) {
auto orig = ab_map[it.first];<|fim_middle|> throw std::runtime_error("tensor '" + name + "' has incorrect shape (hint: maybe wrong base model?)");
|
const std::string & name = it.first;
llama_adapter_lora_weight & w = it.second;
bool is_token_embd = str_endswith(name, "token_embd.weight");
if (!w.a || !w.b) {
throw std::runtime_error("LoRA tensor pair for '" + name + "' is missing one component");
}
// device buft and device ctx
const auto * model_tensor = model.get_tensor(name.c_str());
if (!model_tensor) {
throw std::runtime_error("LoRA tensor '" + name + "' does not exist in base model (hint: maybe wrong base model?)");
}
auto * buft = ggml_backend_buffer_get_type(model_tensor->buffer);
// do not load loras to extra buffer types (i.e. bufts for repacking) -> use the CPU in that case
for (auto & ex : buft_extra) {
if (ex == buft) {
LLAMA_LOG_WARN("%s: lora for '%s' cannot use buft '%s', fallback to CPU\n", __func__, model_tensor->name, ggml_backend_buft_name(buft));
auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
if (!cpu_dev) {
throw std::runtime_error(format("%s: no CPU backend found", __func__));
}
buft = ggml_backend_dev_buffer_type(cpu_dev);
break;
}
}
LLAMA_LOG_DEBUG("%s: lora for '%s' -> '%s'\n", __func__, model_tensor->name, ggml_backend_buft_name(buft));
ggml_context * dev_ctx = ctx_for_buft(buft);
// validate tensor shape
if (is_token_embd) {
// expect B to be non-transposed, A and B are flipped; see llm_build_inp_embd()
if (model_tensor->ne[0] != w.b->ne[1] || model_tensor->ne[1] != w.a->ne[1]) {
throw std::runtime_error("tensor '" + name + "' has incorrect shape (hint: maybe wrong base model?)");
}
} else {
if (model_tensor->ne[0] != w.a->ne[0] || model_tensor->ne[1] != w.b->ne[1]) {
|
throw std::runtime_error("tensor '" + name + "' has incorrect shape (hint: maybe wrong base model?)");
|
}
if (w.a->ne[1] != w.b->ne[0]) {
throw std::runtime_error("lora_a tensor is not transposed (hint: adapter from \"finetune\" example is no longer supported)");
}
}
// save tensor to adapter
ggml_tensor * tensor_a = ggml_dup_tensor(dev_ctx, w.a);
ggml_tensor * tensor_b = ggml_dup_tensor(dev_ctx, w.b);
ggml_set_name(tensor_a, w.a->name);
ggml_set_name(tensor_b, w.b->name);
adapter.ab_map[name] = llama_adapter_lora_weight(tensor_a, tensor_b);
}
// allocate tensors / buffers and zero
{
adapter.ctxs.reserve(ctx_map.size());
adapter.bufs.reserve(ctx_map.size());
for (auto & it : ctx_map) {
ggml_backend_buffer_type_t buft = it.first;
ggml_context * ctx_dev = it.second;
ggml_backend_buffer_ptr buf { ggml_backend_alloc_ctx_tensors_from_buft(ctx_dev, buft) };
if (!buf) {
throw std::runtime_error("failed to allocate buffer for lora adapter\n");
}
LLAMA_LOG_INFO("%s: %10s LoRA buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf.get()), ggml_backend_buffer_get_size(buf.get())/1024.0/1024.0);
adapter.bufs.emplace_back(std::move(buf));
}
}
// set tensor data
{
llama_file gguf_file(path_lora, "rb");
std::vector<uint8_t> read_buf;
auto set_tensor = [&](ggml_tensor * orig, ggml_tensor * dev) {
size_t offs = gguf_get_data_offset(ctx_gguf.get()) + gguf_get_tensor_offset(ctx_gguf.get(), gguf_find_tensor(ctx_gguf.get(), orig->name));
size_t size = ggml_nbytes(orig);
read_buf.resize(size);
gguf_file.seek(offs, SEEK_SET);
gguf_file.read_raw(read_buf.data(), size);
ggml_backend_tensor_set(dev, read_buf.data(), 0, size);
};
for (auto & it : adapter.ab_map) {
auto orig = ab_map[it.first];
|
random
|
<|fim_prefix|>ae->parent = root_rid;
ae->run = Vector3i(range.x, range.y, i);
ae->node = accesskit_node_new(ae->role);
text_elements.push_back(ae);
// UTF-8 text and char lengths.
Vector<uint8_t> char_lengths;
CharString text = t.utf8(&char_lengths);
accesskit_node_set_value(ae->node, text.ptr());
accesskit_node_set_character_lengths(ae->node, char_lengths.size(), char_lengths.ptr());
// Word sizes.
Vector<uint8_t> word_lengths;
int32_t prev = ae->run.x;
int32_t total = 0;
for (int j = 0; j < words.size(); j += 2) {
if (words[j] < ae->run.x) {
continue;
}
if (words[j] >= ae->run.y) {
break;
}
int32_t wlen = words[j] - prev;
while (wlen > 255) {
word_lengths.push_back(255);
wlen -= 255;
total += 255;
}
if (wlen > 0) {
word_lengths.push_back(wlen);
total += wlen;
}
prev = words[j];
}
if (total < t.length()) {
word_lengths.push_back(t.length() - total);
}
accesskit_node_set_word_lengths(ae->node, word_lengths.size(), word_lengths.ptr());
// Char widths and positions.
Vector<float> char_positions;
Vector<float> char_widths;
char_positions.resize_initialized(t.length());
float *positions_ptr = char_positions.ptrw();
char_widths.resize_initialized(t.length());
float *widths_ptr = char_widths.ptrw();
float size_x = 0.0;
for (int j = gl_index; j < gl_count; j += gl[j].count) {
if (gl[j].start >= ae->run.y) {
gl_index = j;
break;
}
float advance = 0.0; // Graphame advance.
for (int k = 0; k < gl[j].count; k++) {
advance += gl[j + k].advance;
}
int chars = gl[j].end - gl[j].start;
float adv_per_char = advance / (float)chars;
for (int k = 0; k < chars; k++) {
int index = gl[j].start + k - ae->run.x;
ERR_CONTINUE(index < 0 || index >= t.length());
positions_ptr[index] = size_x + adv_per_char * k;
widths_ptr[index] = adv_per_char;
}
size_x += advance * gl[j].repeat;
}
positions_ptr[t.length() - 1] = size_x;
<|fim_suffix|>
accesskit_node_set_character_positions(ae->node, char_positions.size(), char_positions.ptr());
accesskit_node_set_character_widths(ae->node, char_widths.size(), char_widths.ptr());
RID font_rid = TS->shaped_get_run_font_rid(p_shaped_text, i);
if (font_rid != RID()) {
CharString font_name = TS->font_get_name(font_rid).utf8();
if (font_name.length() > 0) {
accesskit_node_set_font_family(ae->node, font_name.ptr());
}
if (TS->font_get_style(font_rid).has_flag(TextServer::FONT_BOLD)) {
accesskit_node_set_bold(ae->node);
}
if (TS->font_get_style(font_rid).has_flag(TextServer::FONT_ITALIC)) {
accesskit_node_set_italic(ae->node);
}
accesskit_node_set_font_weight(ae->node, TS->font_get_weight(font_rid));
}
accesskit_node_set_font_size(ae->node, TS->shaped_get_run_font_size(p_shaped_text, i));
CharString language = TS->shaped_get_run_language(p_shaped_text, i).utf8();
if (language.length() > 0) {
accesskit_node_set_language(ae->node, language.ptr());
}
accesskit_node_set_text_direction(ae->node, ACCESSKIT_TEXT_DIRECTION_LEFT_TO_RIGHT);
accesskit_rect rect;
rect.x0 = run_off_x;
rect.y0 = 0;
rect.x1 = run_off_x + size_x;
rect.y1 = text_height;
accesskit_node_set_bounds(ae->node, rect);
accesskit_node_add_action(ae->node, ACCESSKIT_ACTION_SCROLL_INTO_VIEW);
run_off_x += size_x;
}
{
// Add "\n" at the end.
AccessibilityElement *ae = memnew(AccessibilityElement);
ae->role = ACCESSKIT_ROLE_TEXT_RUN;
ae->window_id = parent_ae->window_id;
ae->parent = root_rid;
ae->run = Vector3i(full_range.y, full_range.y, run_count);
ae->node = accesskit_node_new(ae->role);
text_elements.push_back(ae);
Vector<uint8_t> char_lengths;
char_lengths.push_back(1);
accesskit_node_set_value(ae->node, "\n");
accesskit_node_set_character_lengths(ae->node, char_lengths.size(), char_lengths.ptr());
Vector<float> char_positions;
Vector<float> char_widths;
char_positions.push_back(0.0);
char_width<|fim_middle|>widths_ptr[t.length() - 1] = 1.0;
|
ae->parent = root_rid;
ae->run = Vector3i(range.x, range.y, i);
ae->node = accesskit_node_new(ae->role);
text_elements.push_back(ae);
// UTF-8 text and char lengths.
Vector<uint8_t> char_lengths;
CharString text = t.utf8(&char_lengths);
accesskit_node_set_value(ae->node, text.ptr());
accesskit_node_set_character_lengths(ae->node, char_lengths.size(), char_lengths.ptr());
// Word sizes.
Vector<uint8_t> word_lengths;
int32_t prev = ae->run.x;
int32_t total = 0;
for (int j = 0; j < words.size(); j += 2) {
if (words[j] < ae->run.x) {
continue;
}
if (words[j] >= ae->run.y) {
break;
}
int32_t wlen = words[j] - prev;
while (wlen > 255) {
word_lengths.push_back(255);
wlen -= 255;
total += 255;
}
if (wlen > 0) {
word_lengths.push_back(wlen);
total += wlen;
}
prev = words[j];
}
if (total < t.length()) {
word_lengths.push_back(t.length() - total);
}
accesskit_node_set_word_lengths(ae->node, word_lengths.size(), word_lengths.ptr());
// Char widths and positions.
Vector<float> char_positions;
Vector<float> char_widths;
char_positions.resize_initialized(t.length());
float *positions_ptr = char_positions.ptrw();
char_widths.resize_initialized(t.length());
float *widths_ptr = char_widths.ptrw();
float size_x = 0.0;
for (int j = gl_index; j < gl_count; j += gl[j].count) {
if (gl[j].start >= ae->run.y) {
gl_index = j;
break;
}
float advance = 0.0; // Graphame advance.
for (int k = 0; k < gl[j].count; k++) {
advance += gl[j + k].advance;
}
int chars = gl[j].end - gl[j].start;
float adv_per_char = advance / (float)chars;
for (int k = 0; k < chars; k++) {
int index = gl[j].start + k - ae->run.x;
ERR_CONTINUE(index < 0 || index >= t.length());
positions_ptr[index] = size_x + adv_per_char * k;
widths_ptr[index] = adv_per_char;
}
size_x += advance * gl[j].repeat;
}
positions_ptr[t.length() - 1] = size_x;
|
widths_ptr[t.length() - 1] = 1.0;
|
accesskit_node_set_character_positions(ae->node, char_positions.size(), char_positions.ptr());
accesskit_node_set_character_widths(ae->node, char_widths.size(), char_widths.ptr());
RID font_rid = TS->shaped_get_run_font_rid(p_shaped_text, i);
if (font_rid != RID()) {
CharString font_name = TS->font_get_name(font_rid).utf8();
if (font_name.length() > 0) {
accesskit_node_set_font_family(ae->node, font_name.ptr());
}
if (TS->font_get_style(font_rid).has_flag(TextServer::FONT_BOLD)) {
accesskit_node_set_bold(ae->node);
}
if (TS->font_get_style(font_rid).has_flag(TextServer::FONT_ITALIC)) {
accesskit_node_set_italic(ae->node);
}
accesskit_node_set_font_weight(ae->node, TS->font_get_weight(font_rid));
}
accesskit_node_set_font_size(ae->node, TS->shaped_get_run_font_size(p_shaped_text, i));
CharString language = TS->shaped_get_run_language(p_shaped_text, i).utf8();
if (language.length() > 0) {
accesskit_node_set_language(ae->node, language.ptr());
}
accesskit_node_set_text_direction(ae->node, ACCESSKIT_TEXT_DIRECTION_LEFT_TO_RIGHT);
accesskit_rect rect;
rect.x0 = run_off_x;
rect.y0 = 0;
rect.x1 = run_off_x + size_x;
rect.y1 = text_height;
accesskit_node_set_bounds(ae->node, rect);
accesskit_node_add_action(ae->node, ACCESSKIT_ACTION_SCROLL_INTO_VIEW);
run_off_x += size_x;
}
{
// Add "\n" at the end.
AccessibilityElement *ae = memnew(AccessibilityElement);
ae->role = ACCESSKIT_ROLE_TEXT_RUN;
ae->window_id = parent_ae->window_id;
ae->parent = root_rid;
ae->run = Vector3i(full_range.y, full_range.y, run_count);
ae->node = accesskit_node_new(ae->role);
text_elements.push_back(ae);
Vector<uint8_t> char_lengths;
char_lengths.push_back(1);
accesskit_node_set_value(ae->node, "\n");
accesskit_node_set_character_lengths(ae->node, char_lengths.size(), char_lengths.ptr());
Vector<float> char_positions;
Vector<float> char_widths;
char_positions.push_back(0.0);
char_width
|
ast_based
|
<|fim_prefix|>il_end) {
const auto & hparams = model.hparams;
if (data == nullptr) {
// disable the current control vector (but leave allocated for later)
layer_start = -1;
layer_end = -1;
return true;
}
if (n_embd != (int) hparams.n_embd) {
LLAMA_LOG_ERROR("%s: control vector n_embd does not match model\n", __func__);
return false;
}
if (tensors.empty()) {
if (!init(model)) {
return false;
}
}
layer_start = il_start;
layer_end = il_end;
for (size_t il = 1; il < hparams.n_layer; il++) {
assert(tensors[il] != nullptr);
const size_t off = n_embd * (il - 1); // buffer doesn't have data for layer 0, since it's never present
if (off + n_embd <= len) {
ggml_backend_tensor_set(tensors[il], data + off, 0, n_embd * ggml_element_size(tensors[il]));
}
}
return true;
}
// lora
llama_adapter_lora_weight * llama_adapter_lora::get_weight(ggml_tensor * w) {
const std::string name(w->name);
const auto pos = ab_map.find(name);
if (pos != ab_map.end()) {
return &pos->second;
}
return nullptr;
}
static void llama_adapter_lora_init_impl(llama_model & model, const char * path_lora, llama_adapter_lora & adapter) {
LLAMA_LOG_INFO("%s: loading lora adapter from '%s' ...\n", __func__, path_lora);
ggml_context * ctx_init;
gguf_init_params meta_gguf_params = {
/* .no_alloc = */ true,
/* .ctx = */ &ctx_init,
};
gguf_context_ptr ctx_gguf { gguf_init_from_file(path_lora, meta_gguf_params) };
if (!ctx_gguf) {
throw std::runtime_error("failed to load lora adapter file from " + std::string(path_lora));
}
ggml_context_ptr ctx { ctx_init };
// check metadata
{
const gguf_context * gguf_ctx = ctx_gguf.get();
LLAMA_LOG_INFO("%s: Dumping metadata keys/values.\n", __func__);
// get metadata as string
for (<|fim_suffix|> i < gguf_get_n_kv(gguf_ctx); i++) {
gguf_type type = gguf_get_kv_type(gguf_ctx, i);
const std::string type_name =
type == GGUF_TYPE_ARRAY
? format("%s[%s,%zu]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(gguf_ctx, i)), gguf_get_arr_n(gguf_ctx, i))
: gguf_type_name(type);
const char * name = gguf_get_key(gguf_ctx, i);
const std::string value = gguf_kv_to_str(gguf_ctx, i);
if (type != GGUF_TYPE_ARRAY) {
adapter.gguf_kv.emplace(name, value);
}
const size_t MAX_VALUE_LEN = 40;
std::string print_value = value.size() > MAX_VALUE_LEN ? format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str()) : value;
replace_all(print_value, "\n", "\\n");
LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), print_value.c_str());
}
auto get_kv_str = [&](const std::string & key) -> std::string {
int id = gguf_find_key(gguf_ctx, key.c_str());
return id < 0 ? "" : std::string(gguf_get_val_str(gguf_ctx, id));
};
auto get_kv_f32 = [&](const std::string & key) -> float {
int id = gguf_find_key(gguf_ctx, key.c_str());
return id < 0 ? 0.0f : gguf_get_val_f32(gguf_ctx, id);
};
LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
auto general_type = get_kv_str(llm_kv(LLM_KV_GENERAL_TYPE));
if (general_type != "adapter") {
throw std::runtime_error("expect general.type to be 'adapter', but got: " + general_type);
}
auto general_arch_str = get_kv_str(llm_kv(LLM_KV_GENERAL_ARCHITECTURE));
auto general_arch = llm_arch_from_string(general_arch_str);
if (general_arch != model.arch) {
throw std::runtime_error("model arch and LoRA arch mismatch");
}
auto adapter_type = get_kv_str(llm_kv(LLM_KV_ADAPTER_<|fim_middle|>int i = 0;
|
il_end) {
const auto & hparams = model.hparams;
if (data == nullptr) {
// disable the current control vector (but leave allocated for later)
layer_start = -1;
layer_end = -1;
return true;
}
if (n_embd != (int) hparams.n_embd) {
LLAMA_LOG_ERROR("%s: control vector n_embd does not match model\n", __func__);
return false;
}
if (tensors.empty()) {
if (!init(model)) {
return false;
}
}
layer_start = il_start;
layer_end = il_end;
for (size_t il = 1; il < hparams.n_layer; il++) {
assert(tensors[il] != nullptr);
const size_t off = n_embd * (il - 1); // buffer doesn't have data for layer 0, since it's never present
if (off + n_embd <= len) {
ggml_backend_tensor_set(tensors[il], data + off, 0, n_embd * ggml_element_size(tensors[il]));
}
}
return true;
}
// lora
llama_adapter_lora_weight * llama_adapter_lora::get_weight(ggml_tensor * w) {
const std::string name(w->name);
const auto pos = ab_map.find(name);
if (pos != ab_map.end()) {
return &pos->second;
}
return nullptr;
}
static void llama_adapter_lora_init_impl(llama_model & model, const char * path_lora, llama_adapter_lora & adapter) {
LLAMA_LOG_INFO("%s: loading lora adapter from '%s' ...\n", __func__, path_lora);
ggml_context * ctx_init;
gguf_init_params meta_gguf_params = {
/* .no_alloc = */ true,
/* .ctx = */ &ctx_init,
};
gguf_context_ptr ctx_gguf { gguf_init_from_file(path_lora, meta_gguf_params) };
if (!ctx_gguf) {
throw std::runtime_error("failed to load lora adapter file from " + std::string(path_lora));
}
ggml_context_ptr ctx { ctx_init };
// check metadata
{
const gguf_context * gguf_ctx = ctx_gguf.get();
LLAMA_LOG_INFO("%s: Dumping metadata keys/values.\n", __func__);
// get metadata as string
for (
|
int i = 0;
|
i < gguf_get_n_kv(gguf_ctx); i++) {
gguf_type type = gguf_get_kv_type(gguf_ctx, i);
const std::string type_name =
type == GGUF_TYPE_ARRAY
? format("%s[%s,%zu]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(gguf_ctx, i)), gguf_get_arr_n(gguf_ctx, i))
: gguf_type_name(type);
const char * name = gguf_get_key(gguf_ctx, i);
const std::string value = gguf_kv_to_str(gguf_ctx, i);
if (type != GGUF_TYPE_ARRAY) {
adapter.gguf_kv.emplace(name, value);
}
const size_t MAX_VALUE_LEN = 40;
std::string print_value = value.size() > MAX_VALUE_LEN ? format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str()) : value;
replace_all(print_value, "\n", "\\n");
LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), print_value.c_str());
}
auto get_kv_str = [&](const std::string & key) -> std::string {
int id = gguf_find_key(gguf_ctx, key.c_str());
return id < 0 ? "" : std::string(gguf_get_val_str(gguf_ctx, id));
};
auto get_kv_f32 = [&](const std::string & key) -> float {
int id = gguf_find_key(gguf_ctx, key.c_str());
return id < 0 ? 0.0f : gguf_get_val_f32(gguf_ctx, id);
};
LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
auto general_type = get_kv_str(llm_kv(LLM_KV_GENERAL_TYPE));
if (general_type != "adapter") {
throw std::runtime_error("expect general.type to be 'adapter', but got: " + general_type);
}
auto general_arch_str = get_kv_str(llm_kv(LLM_KV_GENERAL_ARCHITECTURE));
auto general_arch = llm_arch_from_string(general_arch_str);
if (general_arch != model.arch) {
throw std::runtime_error("model arch and LoRA arch mismatch");
}
auto adapter_type = get_kv_str(llm_kv(LLM_KV_ADAPTER_
|
ast_based
|
<|fim_prefix|> if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->commit_action();
//selection.clear();
}
}
void AnimationBezierTrackEdit::_bezier_track_insert_key_at_anim(const Ref<Animation> &p_anim, int p_track, double p_time, real_t p_value, const Vector2 &p_in_handle, const Vector2 &p_out_handle, const Animation::HandleMode p_handle_mode, Animation::HandleSetMode p_handle_set_mode) {
int idx = p_anim->bezier_track_insert_key(p_track, p_time, p_value, p_in_handle, p_out_handle);
p_anim->bezier_track_set_key_handle_mode(p_track, idx, p_handle_mode, p_handle_set_mode);
}
void AnimationBezierTrackEdit::_bind_methods() {
ClassDB::bind_method(D_METHOD("_clear_selection"), &AnimationBezierTrackEdit::_clear_selection);
ClassDB::bind_method(D_METHOD("_clear_selection_for_anim"), &AnimationBezierTrackEdit::_clear_selection_for_anim);
ClassDB::bind_method(D_METHOD("_select_at_anim"), &AnimationBezierTrackEdit::_select_at_anim);
ClassDB::bind_method(D_METHOD("_update_hidden_tracks_after"), &AnimationBezierTrackEdit::_update_hidden_tracks_after);
ClassDB::bind_method(D_METHOD("_update_locked_tracks_after"), &AnimationBezierTrackEdit::_update_locked_tracks_after);
ClassDB::bind_method(D_METHOD("_bezier_track_insert_key_at_anim"), &AnimationBezierTrackEdit::_bezier_track_insert_key_at_anim, DEFVAL(Animation::HANDLE_SET_MODE_NONE));
ADD_SIGNAL(MethodInfo("select_key", PropertyInfo(Variant::INT, "index"), PropertyInfo(Variant::BOOL, "single"), PropertyInfo(Variant::INT, "track")));
ADD_SIGNAL(MethodInfo("deselect_key", PropertyInfo(Variant::INT, "index"), PropertyInfo(Variant::INT, "track")));
ADD_SIGNAL(MethodInfo("clear_selection"));
ADD_SIGNAL(MethodInfo("timeline_changed", PropertyInfo(Variant::FLOAT, "position"), PropertyInfo(Variant::BOOL, "timeline_only")));
}
AnimationBezierTrackEdit::AnimationBezierTrackEdit() {<|fim_suffix|> play_position->set_anchors_and_offsets_preset(PRESET_FULL_RECT);
play_position->connect(SceneStringName(draw), callable_mp(this, &AnimationBezierTrackEdit::_play_position_draw));
set_focus_mode(FOCUS_CLICK);
set_clip_contents(true);
ED_SHORTCUT("animation_bezier_editor/focus", TTRC("Focus"), Key::F);
ED_SHORTCUT("animation_bezier_editor/select_all_keys", TTRC("Select All Keys"), KeyModifierMask::CMD_OR_CTRL | Key::A);
ED_SHORTCUT("animation_bezier_editor/deselect_all_keys", TTRC("Deselect All Keys"), KeyModifierMask::CMD_OR_CTRL | KeyModifierMask::SHIFT | Key::A);
menu = memnew(PopupMenu);
add_child(menu);
menu->connect(SceneStringName(id_pressed), callable_mp(this, &AnimationBezierTrackEdit::_menu_selected));
}
<|fim_middle|> panner.instantiate();
panner->set_callbacks(callable_mp(this, &AnimationBezierTrackEdit::_pan_callback), callable_mp(this, &AnimationBezierTrackEdit::_zoom_callback));
play_position = memnew(Control);
play_position->set_mouse_filter(MOUSE_FILTER_PASS);
add_child(play_position);
|
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->commit_action();
//selection.clear();
}
}
void AnimationBezierTrackEdit::_bezier_track_insert_key_at_anim(const Ref<Animation> &p_anim, int p_track, double p_time, real_t p_value, const Vector2 &p_in_handle, const Vector2 &p_out_handle, const Animation::HandleMode p_handle_mode, Animation::HandleSetMode p_handle_set_mode) {
int idx = p_anim->bezier_track_insert_key(p_track, p_time, p_value, p_in_handle, p_out_handle);
p_anim->bezier_track_set_key_handle_mode(p_track, idx, p_handle_mode, p_handle_set_mode);
}
void AnimationBezierTrackEdit::_bind_methods() {
ClassDB::bind_method(D_METHOD("_clear_selection"), &AnimationBezierTrackEdit::_clear_selection);
ClassDB::bind_method(D_METHOD("_clear_selection_for_anim"), &AnimationBezierTrackEdit::_clear_selection_for_anim);
ClassDB::bind_method(D_METHOD("_select_at_anim"), &AnimationBezierTrackEdit::_select_at_anim);
ClassDB::bind_method(D_METHOD("_update_hidden_tracks_after"), &AnimationBezierTrackEdit::_update_hidden_tracks_after);
ClassDB::bind_method(D_METHOD("_update_locked_tracks_after"), &AnimationBezierTrackEdit::_update_locked_tracks_after);
ClassDB::bind_method(D_METHOD("_bezier_track_insert_key_at_anim"), &AnimationBezierTrackEdit::_bezier_track_insert_key_at_anim, DEFVAL(Animation::HANDLE_SET_MODE_NONE));
ADD_SIGNAL(MethodInfo("select_key", PropertyInfo(Variant::INT, "index"), PropertyInfo(Variant::BOOL, "single"), PropertyInfo(Variant::INT, "track")));
ADD_SIGNAL(MethodInfo("deselect_key", PropertyInfo(Variant::INT, "index"), PropertyInfo(Variant::INT, "track")));
ADD_SIGNAL(MethodInfo("clear_selection"));
ADD_SIGNAL(MethodInfo("timeline_changed", PropertyInfo(Variant::FLOAT, "position"), PropertyInfo(Variant::BOOL, "timeline_only")));
}
AnimationBezierTrackEdit::AnimationBezierTrackEdit() {
|
panner.instantiate();
panner->set_callbacks(callable_mp(this, &AnimationBezierTrackEdit::_pan_callback), callable_mp(this, &AnimationBezierTrackEdit::_zoom_callback));
play_position = memnew(Control);
play_position->set_mouse_filter(MOUSE_FILTER_PASS);
add_child(play_position);
|
play_position->set_anchors_and_offsets_preset(PRESET_FULL_RECT);
play_position->connect(SceneStringName(draw), callable_mp(this, &AnimationBezierTrackEdit::_play_position_draw));
set_focus_mode(FOCUS_CLICK);
set_clip_contents(true);
ED_SHORTCUT("animation_bezier_editor/focus", TTRC("Focus"), Key::F);
ED_SHORTCUT("animation_bezier_editor/select_all_keys", TTRC("Select All Keys"), KeyModifierMask::CMD_OR_CTRL | Key::A);
ED_SHORTCUT("animation_bezier_editor/deselect_all_keys", TTRC("Deselect All Keys"), KeyModifierMask::CMD_OR_CTRL | KeyModifierMask::SHIFT | Key::A);
menu = memnew(PopupMenu);
add_child(menu);
menu->connect(SceneStringName(id_pressed), callable_mp(this, &AnimationBezierTrackEdit::_menu_selected));
}
|
random
|
<|fim_prefix|>ibDataController::saveCurrentCameraParameters() const
{
for(size_t i = 0; i < mCalibData->allFrames.size(); i++)
cv::imwrite(cv::format("calibration_%zu.png", i), mCalibData->allFrames[i]);
bool success = false;
if(mCalibData->cameraMatrix.total()) {
cv::FileStorage parametersWriter(mParamsFileName, cv::FileStorage::WRITE);
if(parametersWriter.isOpened()) {
time_t rawtime;
time(&rawtime);
char buf[256];
strftime(buf, sizeof(buf)-1, "%c", localtime(&rawtime));
parametersWriter << "calibrationDate" << buf;
parametersWriter << "framesCount" << std::max((int)mCalibData->objectPoints.size(), (int)mCalibData->allCharucoCorners.size());
parametersWriter << "cameraResolution" << mCalibData->imageSize;
parametersWriter << "camera_matrix" << mCalibData->cameraMatrix;
parametersWriter << "camera_matrix_std_dev" << mCalibData->stdDeviations.rowRange(cv::Range(0, 4));
parametersWriter << "distortion_coefficients" << mCalibData->distCoeffs;
parametersWriter << "distortion_coefficients_std_dev" << mCalibData->stdDeviations.rowRange(cv::Range(4, 9));
parametersWriter << "avg_reprojection_error" << mCalibData->totalAvgErr;
parametersWriter.release();
success = true;
}
}
return success;
}
void calib::calibDataController::printParametersToConsole(std::ostream &output) const
{
const char* border = "---------------------------------------------------";
output << border << std::endl;
output << "Frames used for calibration: " << std::max(mCalibData->objectPoints.size(), mCalibData->allCharucoCorners.size())
<< " \t RMS = " << mCalibData->totalAvgErr << std::endl;
if(mCalibData->cameraMatrix.at<double>(0,0) == mCalibData->cameraMatrix.at<double>(1,1))
output << "F = " << <|fim_suffix|> << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(1) << std::endl;
else
output << "Fx = " << mCalibData->cameraMatrix.at<double>(0,0) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(0) << " \t "
<< "Fy = " << mCalibData->cameraMatrix.at<double>(1,1) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(1) << std::endl;
output << "Cx = " << mCalibData->cameraMatrix.at<double>(0,2) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(2) << " \t"
<< "Cy = " << mCalibData->cameraMatrix.at<double>(1,2) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(3) << std::endl;
output << "K1 = " << mCalibData->distCoeffs.at<double>(0) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(4) << std::endl;
output << "K2 = " << mCalibData->distCoeffs.at<double>(1) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(5) << std::endl;
output << "K3 = " << mCalibData->distCoeffs.at<double>(4) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(8) << std::endl;
output << "TD1 = " << mCalibData->distCoeffs.at<double>(2) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(6) << std::endl;
output << "TD2 = " << mCalibData->distCoeffs.at<double>(3) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(7) << std::endl;
}
void calib::calibDataController::updateUndistortMap()
{
cv::initUndistortRectifyMap(mCalibData->cameraMatrix, mCalibData->distCoeffs, cv::noArray(),
cv::getOptimalNewCameraMatrix(mCalibData->cameraMatrix, mCalibData->distCoeffs, mCalibData->imageSize, 0.0, mCalibData->imageSize),
mCalibData->imageSize, CV_16SC2, mCalibData->undistMap1, mCalibData->undistMap2);
}
<|fim_middle|>mCalibData->cameraMatrix.at<double>(1,1)
|
ibDataController::saveCurrentCameraParameters() const
{
for(size_t i = 0; i < mCalibData->allFrames.size(); i++)
cv::imwrite(cv::format("calibration_%zu.png", i), mCalibData->allFrames[i]);
bool success = false;
if(mCalibData->cameraMatrix.total()) {
cv::FileStorage parametersWriter(mParamsFileName, cv::FileStorage::WRITE);
if(parametersWriter.isOpened()) {
time_t rawtime;
time(&rawtime);
char buf[256];
strftime(buf, sizeof(buf)-1, "%c", localtime(&rawtime));
parametersWriter << "calibrationDate" << buf;
parametersWriter << "framesCount" << std::max((int)mCalibData->objectPoints.size(), (int)mCalibData->allCharucoCorners.size());
parametersWriter << "cameraResolution" << mCalibData->imageSize;
parametersWriter << "camera_matrix" << mCalibData->cameraMatrix;
parametersWriter << "camera_matrix_std_dev" << mCalibData->stdDeviations.rowRange(cv::Range(0, 4));
parametersWriter << "distortion_coefficients" << mCalibData->distCoeffs;
parametersWriter << "distortion_coefficients_std_dev" << mCalibData->stdDeviations.rowRange(cv::Range(4, 9));
parametersWriter << "avg_reprojection_error" << mCalibData->totalAvgErr;
parametersWriter.release();
success = true;
}
}
return success;
}
void calib::calibDataController::printParametersToConsole(std::ostream &output) const
{
const char* border = "---------------------------------------------------";
output << border << std::endl;
output << "Frames used for calibration: " << std::max(mCalibData->objectPoints.size(), mCalibData->allCharucoCorners.size())
<< " \t RMS = " << mCalibData->totalAvgErr << std::endl;
if(mCalibData->cameraMatrix.at<double>(0,0) == mCalibData->cameraMatrix.at<double>(1,1))
output << "F = " <<
|
mCalibData->cameraMatrix.at<double>(1,1)
|
<< " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(1) << std::endl;
else
output << "Fx = " << mCalibData->cameraMatrix.at<double>(0,0) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(0) << " \t "
<< "Fy = " << mCalibData->cameraMatrix.at<double>(1,1) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(1) << std::endl;
output << "Cx = " << mCalibData->cameraMatrix.at<double>(0,2) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(2) << " \t"
<< "Cy = " << mCalibData->cameraMatrix.at<double>(1,2) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(3) << std::endl;
output << "K1 = " << mCalibData->distCoeffs.at<double>(0) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(4) << std::endl;
output << "K2 = " << mCalibData->distCoeffs.at<double>(1) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(5) << std::endl;
output << "K3 = " << mCalibData->distCoeffs.at<double>(4) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(8) << std::endl;
output << "TD1 = " << mCalibData->distCoeffs.at<double>(2) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(6) << std::endl;
output << "TD2 = " << mCalibData->distCoeffs.at<double>(3) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(7) << std::endl;
}
void calib::calibDataController::updateUndistortMap()
{
cv::initUndistortRectifyMap(mCalibData->cameraMatrix, mCalibData->distCoeffs, cv::noArray(),
cv::getOptimalNewCameraMatrix(mCalibData->cameraMatrix, mCalibData->distCoeffs, mCalibData->imageSize, 0.0, mCalibData->imageSize),
mCalibData->imageSize, CV_16SC2, mCalibData->undistMap1, mCalibData->undistMap2);
}
|
ast_based
|
<|fim_prefix|> root_ae->children.push_back(rid);
wd->update.insert(rid);
}
return root_rid;
}
bool AccessibilityDriverAccessKit::accessibility_has_element(const RID &p_id) const {
return rid_owner.owns(p_id);
}
void AccessibilityDriverAccessKit::_free_recursive(WindowData *p_wd, const RID &p_id) {
if (p_wd && p_wd->update.has(p_id)) {
p_wd->update.erase(p_id);
}
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
for (const RID &rid : ae->children) {
_free_recursive(p_wd, rid);
}
if (ae->node) {
accesskit_node_free(ae->node);
}
memdelete(ae);
rid_owner.free(p_id);
}
void AccessibilityDriverAccessKit::accessibility_free_element(const RID &p_id) {
ERR_FAIL_COND_MSG(in_accessibility_update, "Element can't be removed inside NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
if (ae) {
WindowData *wd = windows.getptr(ae->window_id);
AccessibilityElement *parent_ae = rid_owner.get_or_null(ae->parent);
if (parent_ae) {
parent_ae->children.erase(p_id);
}
_free_recursive(wd, p_id);
}
}
void AccessibilityDriverAccessKit::accessibility_element_set_meta(const RID &p_id, const Variant &p_meta) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
ae->meta = p_meta;
}
Variant AccessibilityDriverAccessKit::accessibility_element_get_meta(const RID &p_id) const {
const AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL_V(ae, Variant());
return ae->meta;
}
void AccessibilityDriverAccessKit::accessibility_update_set_focus(const RID &p_id) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
if (p_id.is_valid() && rid_owner.owns(p_id)) {
focus = p_id;
} else {<|fim_suffix|>
RID AccessibilityDriverAccessKit::accessibility_get_window_root(DisplayServer::WindowID p_window_id) const {
const WindowData *wd = windows.getptr(p_window_id);
ERR_FAIL_NULL_V(wd, RID());
return wd->root_id;
}
accesskit_tree_update *AccessibilityDriverAccessKit::_accessibility_build_tree_update(void *p_user_data) {
DisplayServer::WindowID window_id = (DisplayServer::WindowID)(size_t)p_user_data;
ERR_FAIL_COND_V(!singleton->windows.has(window_id), nullptr);
WindowData &wd = singleton->windows[window_id];
singleton->in_accessibility_update = true;
if (singleton->update_cb.is_valid()) {
singleton->update_cb.call(window_id);
}
singleton->in_accessibility_update = false;
AccessibilityElement *focus_ae = singleton->rid_owner.get_or_null(singleton->focus);
uint32_t update_size = wd.update.size();
accesskit_node_id ac_focus = (accesskit_node_id)wd.root_id.get_id();
if (focus_ae && focus_ae->window_id == window_id) {
ac_focus = (accesskit_node_id)singleton->focus.get_id();
}
accesskit_tree_update *tree_update = (update_size > 0) ? accesskit_tree_update_with_capacity_and_focus(update_size, ac_focus) : accesskit_tree_update_with_focus(ac_focus);
for (const RID &rid : wd.update) {
AccessibilityElement *ae = singleton->rid_owner.get_or_null(rid);
if (ae && ae->node) {
for (const RID &child_rid : ae->children) {
accesskit_node_push_child(ae->node, (accesskit_node_id)child_rid.get_id());
}
accesskit_tree_update_push_node(tree_update, (accesskit_node_id)rid.get_id(), ae->node);
ae->node = nullptr;
}
}
wd.update.clear();
return tree_update;
}
void AccessibilityDriverAccessKit::accessibility_update_if_active(const Callable &p_callable) {
ERR_FAIL_COND(!p_callable.is_valid());
update_cb = p_callable;
for (KeyValue<DisplayServer::WindowID, WindowData> &window : windows) {
#ifdef WINDOWS_ENABLED<|fim_middle|> focus = RID();
}
}
|
root_ae->children.push_back(rid);
wd->update.insert(rid);
}
return root_rid;
}
bool AccessibilityDriverAccessKit::accessibility_has_element(const RID &p_id) const {
return rid_owner.owns(p_id);
}
void AccessibilityDriverAccessKit::_free_recursive(WindowData *p_wd, const RID &p_id) {
if (p_wd && p_wd->update.has(p_id)) {
p_wd->update.erase(p_id);
}
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
for (const RID &rid : ae->children) {
_free_recursive(p_wd, rid);
}
if (ae->node) {
accesskit_node_free(ae->node);
}
memdelete(ae);
rid_owner.free(p_id);
}
void AccessibilityDriverAccessKit::accessibility_free_element(const RID &p_id) {
ERR_FAIL_COND_MSG(in_accessibility_update, "Element can't be removed inside NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
if (ae) {
WindowData *wd = windows.getptr(ae->window_id);
AccessibilityElement *parent_ae = rid_owner.get_or_null(ae->parent);
if (parent_ae) {
parent_ae->children.erase(p_id);
}
_free_recursive(wd, p_id);
}
}
void AccessibilityDriverAccessKit::accessibility_element_set_meta(const RID &p_id, const Variant &p_meta) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
ae->meta = p_meta;
}
Variant AccessibilityDriverAccessKit::accessibility_element_get_meta(const RID &p_id) const {
const AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL_V(ae, Variant());
return ae->meta;
}
void AccessibilityDriverAccessKit::accessibility_update_set_focus(const RID &p_id) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
if (p_id.is_valid() && rid_owner.owns(p_id)) {
focus = p_id;
} else {
|
focus = RID();
}
}
|
RID AccessibilityDriverAccessKit::accessibility_get_window_root(DisplayServer::WindowID p_window_id) const {
const WindowData *wd = windows.getptr(p_window_id);
ERR_FAIL_NULL_V(wd, RID());
return wd->root_id;
}
accesskit_tree_update *AccessibilityDriverAccessKit::_accessibility_build_tree_update(void *p_user_data) {
DisplayServer::WindowID window_id = (DisplayServer::WindowID)(size_t)p_user_data;
ERR_FAIL_COND_V(!singleton->windows.has(window_id), nullptr);
WindowData &wd = singleton->windows[window_id];
singleton->in_accessibility_update = true;
if (singleton->update_cb.is_valid()) {
singleton->update_cb.call(window_id);
}
singleton->in_accessibility_update = false;
AccessibilityElement *focus_ae = singleton->rid_owner.get_or_null(singleton->focus);
uint32_t update_size = wd.update.size();
accesskit_node_id ac_focus = (accesskit_node_id)wd.root_id.get_id();
if (focus_ae && focus_ae->window_id == window_id) {
ac_focus = (accesskit_node_id)singleton->focus.get_id();
}
accesskit_tree_update *tree_update = (update_size > 0) ? accesskit_tree_update_with_capacity_and_focus(update_size, ac_focus) : accesskit_tree_update_with_focus(ac_focus);
for (const RID &rid : wd.update) {
AccessibilityElement *ae = singleton->rid_owner.get_or_null(rid);
if (ae && ae->node) {
for (const RID &child_rid : ae->children) {
accesskit_node_push_child(ae->node, (accesskit_node_id)child_rid.get_id());
}
accesskit_tree_update_push_node(tree_update, (accesskit_node_id)rid.get_id(), ae->node);
ae->node = nullptr;
}
}
wd.update.clear();
return tree_update;
}
void AccessibilityDriverAccessKit::accessibility_update_if_active(const Callable &p_callable) {
ERR_FAIL_COND(!p_callable.is_valid());
update_cb = p_callable;
for (KeyValue<DisplayServer::WindowID, WindowData> &window : windows) {
#ifdef WINDOWS_ENABLED
|
random
|
<|fim_prefix|>/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/<|fim_suffix|> const uint64_t start_time = OS::get_singleton()->get_ticks_msec();
if (r_img->is_compressed()) {
return; // Do not compress, already compressed.
}
const Image::Format src_format = r_img->get_format();
const bool is_hdr = src_format >= Image::FORMAT_RF && src_format <= Image::FORMAT_RGBE9995;
if (src_format >= Image::FORMAT_RH && src_format <= Image::FORMAT_RGBAH) {
r_img->convert(Image::FORMAT_RGBAH);
} else if (src_format >= Image::FORMAT_RF && src_format <= Image::FORMAT_RGBE9995) {
r_img->convert(Image::FORMAT_RGBAF);
} else {
r_img->convert(Image::FORMAT_RGBA8);
}
// Determine encoder output format from our enum.
const astcenc_profile profile = is_hdr ? ASTCENC_PRF_HDR : ASTCENC_PRF_LDR;
Image::Format target_format = Image::FORMAT_MAX;
unsigned int block_x = 4;
unsigned int block_y = 4;
if (p_format == Image::ASTCFormat::ASTC_FORMAT_4x4) {
if (is_hdr) {
target_format = Image::FORMAT_ASTC_4x4_HDR;
} else {
target_format = Image::FORMAT_ASTC_4x4;
}
} else if (p_format == Image::ASTCFormat::ASTC_FORMAT_8x8) {
if (is_hdr) {
target_format = Image::FORMAT_ASTC_8x8_HDR;
} else {
target_format = Image::FORMAT_ASTC_8x8;
}
block_x = 8;
block_y = 8;
}
// Compress image data and (if required) mipmaps.
const bool has_mipmaps = r_img->has_mipmaps();
int width = r_img->get_width();
int height = r_img->get_height();
int required_width = (width % block_x) != 0 ? width + (block_x - (width % block_x)) : width;
int required_height = (height % block_y) != 0 ? height + (block_y - (height % block_y)) : height;
if (width != required_width || height != required_height) {
// Resize texture to fit block size.
r_img->resize(required_width, required_height);
width = required_width;
height = required_height;
}
print_verbose(vformat("astcenc: Encoding image size %dx%d to format %s%s.", width, height, Image::get_format_name(target_format), has_mipmaps ? ", with mipmaps" : ""));
// Initialize astcenc.<|fim_middle|>
#include "image_compress_astcenc.h"
#include "core/os/os.h"
#include "core/string/print_string.h"
#include <astcenc.h>
#ifdef TOOLS_ENABLED
void _compress_astc(Image *r_img, Image::ASTCFormat p_format) {
|
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
|
#include "image_compress_astcenc.h"
#include "core/os/os.h"
#include "core/string/print_string.h"
#include <astcenc.h>
#ifdef TOOLS_ENABLED
void _compress_astc(Image *r_img, Image::ASTCFormat p_format) {
|
const uint64_t start_time = OS::get_singleton()->get_ticks_msec();
if (r_img->is_compressed()) {
return; // Do not compress, already compressed.
}
const Image::Format src_format = r_img->get_format();
const bool is_hdr = src_format >= Image::FORMAT_RF && src_format <= Image::FORMAT_RGBE9995;
if (src_format >= Image::FORMAT_RH && src_format <= Image::FORMAT_RGBAH) {
r_img->convert(Image::FORMAT_RGBAH);
} else if (src_format >= Image::FORMAT_RF && src_format <= Image::FORMAT_RGBE9995) {
r_img->convert(Image::FORMAT_RGBAF);
} else {
r_img->convert(Image::FORMAT_RGBA8);
}
// Determine encoder output format from our enum.
const astcenc_profile profile = is_hdr ? ASTCENC_PRF_HDR : ASTCENC_PRF_LDR;
Image::Format target_format = Image::FORMAT_MAX;
unsigned int block_x = 4;
unsigned int block_y = 4;
if (p_format == Image::ASTCFormat::ASTC_FORMAT_4x4) {
if (is_hdr) {
target_format = Image::FORMAT_ASTC_4x4_HDR;
} else {
target_format = Image::FORMAT_ASTC_4x4;
}
} else if (p_format == Image::ASTCFormat::ASTC_FORMAT_8x8) {
if (is_hdr) {
target_format = Image::FORMAT_ASTC_8x8_HDR;
} else {
target_format = Image::FORMAT_ASTC_8x8;
}
block_x = 8;
block_y = 8;
}
// Compress image data and (if required) mipmaps.
const bool has_mipmaps = r_img->has_mipmaps();
int width = r_img->get_width();
int height = r_img->get_height();
int required_width = (width % block_x) != 0 ? width + (block_x - (width % block_x)) : width;
int required_height = (height % block_y) != 0 ? height + (block_y - (height % block_y)) : height;
if (width != required_width || height != required_height) {
// Resize texture to fit block size.
r_img->resize(required_width, required_height);
width = required_width;
height = required_height;
}
print_verbose(vformat("astcenc: Encoding image size %dx%d to format %s%s.", width, height, Image::get_format_name(target_format), has_mipmaps ? ", with mipmaps" : ""));
// Initialize astcenc.
|
random
|
<|fim_prefix|>avgError;
mParamsStack.pop();
}
}
void calib::calibDataController::rememberCurrentParameters()
{
cv::Mat oldCameraMat, oldDistcoeefs, oldStdDevs;
mCalibData->cameraMatrix.copyTo(oldCameraMat);
mCalibData->distCoeffs.copyTo(oldDistcoeefs);
mCalibData->stdDeviations.copyTo(oldStdDevs);
mParamsStack.push(cameraParameters(oldCameraMat, oldDistcoeefs, oldStdDevs, mCalibData->totalAvgErr));
}
void calib::calibDataController::deleteAllData()
{
mCalibData->allFrames.clear();
mCalibData->imagePoints.clear();
mCalibData->objectPoints.clear();
mCalibData->allCharucoCorners.clear();
mCalibData->allCharucoIds.clear();
mCalibData->cameraMatrix = mCalibData->distCoeffs = cv::Mat();
mParamsStack = std::stack<cameraParameters>();
rememberCurrentParameters();
}
bool calib::calibDataController::saveCurrentCameraParameters() const
{
for(size_t i = 0; i < mCalibData->allFrames.size(); i++)
cv::imwrite(cv::format("calibration_%zu.png", i), mCalibData->allFrames[i]);
bool success = false;
if(mCalibData->cameraMatrix.total()) {
cv::FileStorage parametersWriter(mParamsFileName, cv::FileStorage::WRITE);
if(parametersWriter.isOpened()) {
time_t rawtime;
time(&rawtime);
char buf[256];
strftime(buf, sizeof(buf)-1, "%c", localtime(&rawtime));
parametersWriter << "calibrationDate" << buf;
parametersWriter << "framesCount" << std::max((int)mCalibData->objectPoints.size(), (int)mCalibData->allCharucoCorners.size());
parametersWriter << "cameraResolution" << mCalibData->imageSize;
parametersWriter << "camera_matrix" << mCalibData->cameraMatrix;
parametersWriter << "camera_matrix_std_dev" << mCalibData->stdDeviations.rowRange(cv::Range(0, 4));
parametersWriter << "distortion_coefficients" << mCalibData->distCoeffs;
<|fim_suffix|>
parametersWriter << "avg_reprojection_error" << mCalibData->totalAvgErr;
parametersWriter.release();
success = true;
}
}
return success;
}
void calib::calibDataController::printParametersToConsole(std::ostream &output) const
{
const char* border = "---------------------------------------------------";
output << border << std::endl;
output << "Frames used for calibration: " << std::max(mCalibData->objectPoints.size(), mCalibData->allCharucoCorners.size())
<< " \t RMS = " << mCalibData->totalAvgErr << std::endl;
if(mCalibData->cameraMatrix.at<double>(0,0) == mCalibData->cameraMatrix.at<double>(1,1))
output << "F = " << mCalibData->cameraMatrix.at<double>(1,1) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(1) << std::endl;
else
output << "Fx = " << mCalibData->cameraMatrix.at<double>(0,0) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(0) << " \t "
<< "Fy = " << mCalibData->cameraMatrix.at<double>(1,1) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(1) << std::endl;
output << "Cx = " << mCalibData->cameraMatrix.at<double>(0,2) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(2) << " \t"
<< "Cy = " << mCalibData->cameraMatrix.at<double>(1,2) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(3) << std::endl;
output << "K1 = " << mCalibData->distCoeffs.at<double>(0) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(4) << std::endl;
output << "K2 = " << mCalibData->distCoeffs.at<double>(1) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(5) << std::endl;
output << "K3 = " << mCalibData->distCoeffs.at<double>(4) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(8) << std::endl;
output << "TD1 = " << mCalibData->distCoeffs.at<double>(2) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(6) << std::endl;
output << "TD2<|fim_middle|>parametersWriter << "distortion_coefficients_std_dev" << mCalibData->stdDeviations.rowRange(cv::Range(4, 9));
|
avgError;
mParamsStack.pop();
}
}
void calib::calibDataController::rememberCurrentParameters()
{
cv::Mat oldCameraMat, oldDistcoeefs, oldStdDevs;
mCalibData->cameraMatrix.copyTo(oldCameraMat);
mCalibData->distCoeffs.copyTo(oldDistcoeefs);
mCalibData->stdDeviations.copyTo(oldStdDevs);
mParamsStack.push(cameraParameters(oldCameraMat, oldDistcoeefs, oldStdDevs, mCalibData->totalAvgErr));
}
void calib::calibDataController::deleteAllData()
{
mCalibData->allFrames.clear();
mCalibData->imagePoints.clear();
mCalibData->objectPoints.clear();
mCalibData->allCharucoCorners.clear();
mCalibData->allCharucoIds.clear();
mCalibData->cameraMatrix = mCalibData->distCoeffs = cv::Mat();
mParamsStack = std::stack<cameraParameters>();
rememberCurrentParameters();
}
bool calib::calibDataController::saveCurrentCameraParameters() const
{
for(size_t i = 0; i < mCalibData->allFrames.size(); i++)
cv::imwrite(cv::format("calibration_%zu.png", i), mCalibData->allFrames[i]);
bool success = false;
if(mCalibData->cameraMatrix.total()) {
cv::FileStorage parametersWriter(mParamsFileName, cv::FileStorage::WRITE);
if(parametersWriter.isOpened()) {
time_t rawtime;
time(&rawtime);
char buf[256];
strftime(buf, sizeof(buf)-1, "%c", localtime(&rawtime));
parametersWriter << "calibrationDate" << buf;
parametersWriter << "framesCount" << std::max((int)mCalibData->objectPoints.size(), (int)mCalibData->allCharucoCorners.size());
parametersWriter << "cameraResolution" << mCalibData->imageSize;
parametersWriter << "camera_matrix" << mCalibData->cameraMatrix;
parametersWriter << "camera_matrix_std_dev" << mCalibData->stdDeviations.rowRange(cv::Range(0, 4));
parametersWriter << "distortion_coefficients" << mCalibData->distCoeffs;
|
parametersWriter << "distortion_coefficients_std_dev" << mCalibData->stdDeviations.rowRange(cv::Range(4, 9));
|
parametersWriter << "avg_reprojection_error" << mCalibData->totalAvgErr;
parametersWriter.release();
success = true;
}
}
return success;
}
void calib::calibDataController::printParametersToConsole(std::ostream &output) const
{
const char* border = "---------------------------------------------------";
output << border << std::endl;
output << "Frames used for calibration: " << std::max(mCalibData->objectPoints.size(), mCalibData->allCharucoCorners.size())
<< " \t RMS = " << mCalibData->totalAvgErr << std::endl;
if(mCalibData->cameraMatrix.at<double>(0,0) == mCalibData->cameraMatrix.at<double>(1,1))
output << "F = " << mCalibData->cameraMatrix.at<double>(1,1) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(1) << std::endl;
else
output << "Fx = " << mCalibData->cameraMatrix.at<double>(0,0) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(0) << " \t "
<< "Fy = " << mCalibData->cameraMatrix.at<double>(1,1) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(1) << std::endl;
output << "Cx = " << mCalibData->cameraMatrix.at<double>(0,2) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(2) << " \t"
<< "Cy = " << mCalibData->cameraMatrix.at<double>(1,2) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(3) << std::endl;
output << "K1 = " << mCalibData->distCoeffs.at<double>(0) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(4) << std::endl;
output << "K2 = " << mCalibData->distCoeffs.at<double>(1) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(5) << std::endl;
output << "K3 = " << mCalibData->distCoeffs.at<double>(4) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(8) << std::endl;
output << "TD1 = " << mCalibData->distCoeffs.at<double>(2) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(6) << std::endl;
output << "TD2
|
ast_based
|
<|fim_prefix|>E->get().first, E->get().second));
}
// 4 - (undo) Remove inserted keys.
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t newpos = animation->track_get_key_time(E->get().first, E->get().second);
newpos += -scaling_selection_offset.x + (newpos - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
undo_redo->add_undo_method(animation.ptr(), "track_remove_key_at_time", E->get().first, newpos);
}
// 5 - (undo) Reinsert keys.
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t oldpos = animation->track_get_key_time(E->get().first, E->get().second);
Array key = animation->track_get_key_value(E->get().first, E->get().second);
undo_redo->add_undo_method(
this,
"_bezier_track_insert_key_at_anim",
animation,
E->get().first,
oldpos,
key[0],
Vector2(key[1], key[2]),
Vector2(key[3], key[4]),
animation->bezier_track_get_key_handle_mode(E->get().first, E->get().second));
}
// 6 - (undo) Reinsert overlapped keys.
List<AnimMoveRestore>::ConstIterator restore_itr = to_restore.begin();
List<Animation::HandleMode>::ConstIterator handle_itr = to_restore_handle_modes.begin();
for (; restore_itr != to_restore.end() && handle_itr != to_restore_handle_modes.end(); ++restore_itr, ++handle_itr) {
const AnimMoveRestore &amr = *restore_itr;
Array key = amr.key;
undo_redo->add_undo_method(animation.ptr(), "track_insert_key", amr.track, amr.time, amr.key, 1);
undo_redo->add_undo_method(
this,
"_bezier_track_insert_key_at_anim",
animation,
amr.track,
amr.time,
key[0],
Vector2(key[1], key[2]),
Vector2(key[3], key[4]),
*handle_itr);
}
undo_redo->add_do_method(this, "_clear_selection_for_anim", animation);
undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation);
// 7 - Reselect.
int i = 0;
for (SelectionSet::Element *E = <|fim_suffix|>; E; E = E->prev()) {
real_t oldpos = animation->track_get_key_time(E->get().first, E->get().second);
real_t newpos = animation->track_get_key_time(E->get().first, E->get().second);
newpos += -scaling_selection_offset.x + (newpos - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
undo_redo->add_do_method(this, "_select_at_anim", animation, E->get().first, newpos, i == 0);
undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, oldpos, i == 0);
i++;
}
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->commit_action();
}
scaling_selection = false;
scaling_selection_scale = Vector2(1, 1);
scaling_selection_offset = Vector2();
queue_redraw();
}
Ref<InputEventMouseMotion> mm = p_event;
if (moving_selection_attempt && mm.is_valid()) {
Point2 new_pos = mm->get_position();
if (mm->is_alt_pressed()) { // Axis snap key move when alt is pressed
if (Math::abs(new_pos.x - moving_selection_mouse_begin.x) > Math::abs(new_pos.y - moving_selection_mouse_begin.y)) {
new_pos.y = moving_selection_mouse_begin.y;
} else {
new_pos.x = moving_selection_mouse_begin.x;
}
}
if (!moving_selection) {
moving_selection = true;
select_single_attempt = IntPair(-1, -1);
}
if (!read_only) {
float y = (get_size().height / 2.0 - new_pos.y) * timeline_v_zoom + timeline_v_scroll;
float moving_selection_begin_time = ((moving_selection_mouse_begin.x - limit) / timeline->get_zoom_scale()) + timeline->get_value();
float new_time = ((new_pos.x - limit) / timeline->get_zoom_scale()) + timeline->get_value();
float moving_selection_pivot = moving_selection_from_key != -1 ? animation->track_get_key_time(moving_selection_from_track, moving_selection_from_key) : 0;
float time_delta = new_time - moving<|fim_middle|>selection.back()
|
E->get().first, E->get().second));
}
// 4 - (undo) Remove inserted keys.
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t newpos = animation->track_get_key_time(E->get().first, E->get().second);
newpos += -scaling_selection_offset.x + (newpos - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
undo_redo->add_undo_method(animation.ptr(), "track_remove_key_at_time", E->get().first, newpos);
}
// 5 - (undo) Reinsert keys.
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t oldpos = animation->track_get_key_time(E->get().first, E->get().second);
Array key = animation->track_get_key_value(E->get().first, E->get().second);
undo_redo->add_undo_method(
this,
"_bezier_track_insert_key_at_anim",
animation,
E->get().first,
oldpos,
key[0],
Vector2(key[1], key[2]),
Vector2(key[3], key[4]),
animation->bezier_track_get_key_handle_mode(E->get().first, E->get().second));
}
// 6 - (undo) Reinsert overlapped keys.
List<AnimMoveRestore>::ConstIterator restore_itr = to_restore.begin();
List<Animation::HandleMode>::ConstIterator handle_itr = to_restore_handle_modes.begin();
for (; restore_itr != to_restore.end() && handle_itr != to_restore_handle_modes.end(); ++restore_itr, ++handle_itr) {
const AnimMoveRestore &amr = *restore_itr;
Array key = amr.key;
undo_redo->add_undo_method(animation.ptr(), "track_insert_key", amr.track, amr.time, amr.key, 1);
undo_redo->add_undo_method(
this,
"_bezier_track_insert_key_at_anim",
animation,
amr.track,
amr.time,
key[0],
Vector2(key[1], key[2]),
Vector2(key[3], key[4]),
*handle_itr);
}
undo_redo->add_do_method(this, "_clear_selection_for_anim", animation);
undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation);
// 7 - Reselect.
int i = 0;
for (SelectionSet::Element *E =
|
selection.back()
|
; E; E = E->prev()) {
real_t oldpos = animation->track_get_key_time(E->get().first, E->get().second);
real_t newpos = animation->track_get_key_time(E->get().first, E->get().second);
newpos += -scaling_selection_offset.x + (newpos - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
undo_redo->add_do_method(this, "_select_at_anim", animation, E->get().first, newpos, i == 0);
undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, oldpos, i == 0);
i++;
}
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->commit_action();
}
scaling_selection = false;
scaling_selection_scale = Vector2(1, 1);
scaling_selection_offset = Vector2();
queue_redraw();
}
Ref<InputEventMouseMotion> mm = p_event;
if (moving_selection_attempt && mm.is_valid()) {
Point2 new_pos = mm->get_position();
if (mm->is_alt_pressed()) { // Axis snap key move when alt is pressed
if (Math::abs(new_pos.x - moving_selection_mouse_begin.x) > Math::abs(new_pos.y - moving_selection_mouse_begin.y)) {
new_pos.y = moving_selection_mouse_begin.y;
} else {
new_pos.x = moving_selection_mouse_begin.x;
}
}
if (!moving_selection) {
moving_selection = true;
select_single_attempt = IntPair(-1, -1);
}
if (!read_only) {
float y = (get_size().height / 2.0 - new_pos.y) * timeline_v_zoom + timeline_v_scroll;
float moving_selection_begin_time = ((moving_selection_mouse_begin.x - limit) / timeline->get_zoom_scale()) + timeline->get_value();
float new_time = ((new_pos.x - limit) / timeline->get_zoom_scale()) + timeline->get_value();
float moving_selection_pivot = moving_selection_from_key != -1 ? animation->track_get_key_time(moving_selection_from_track, moving_selection_from_key) : 0;
float time_delta = new_time - moving
|
ast_based
|
<|fim_prefix|> return vflip;
}
void AnimatedSprite2D::_res_changed() {
set_frame_and_progress(frame, frame_progress);
queue_redraw();
notify_property_list_changed();
}
bool AnimatedSprite2D::is_playing() const {
return playing;
}
void AnimatedSprite2D::set_autoplay(const String &p_name) {
if (is_inside_tree() && !Engine::get_singleton()->is_editor_hint()) {
WARN_PRINT("Setting autoplay after the node has been added to the scene has no effect.");
}
autoplay = p_name;
}
String AnimatedSprite2D::get_autoplay() const {
return autoplay;
}
void AnimatedSprite2D::play(const StringName &p_name, float p_custom_scale, bool p_from_end) {
StringName name = p_name;
if (name == StringName()) {
name = animation;
}
ERR_FAIL_COND_MSG(frames.is_null(), vformat("There is no animation with name '%s'.", name));
ERR_FAIL_COND_MSG(!frames->get_animation_names().has(name), vformat("There is no animation with name '%s'.", name));
if (frames->get_frame_count(name) == 0) {
return;
}
playing = true;
custom_speed_scale = p_custom_scale;
if (name != animation) {
animation = name;
int end_frame = MAX(0, frames->get_frame_count(animation) - 1);
if (p_from_end) {
set_frame_and_progress(end_frame, 1.0);
} else {
set_frame_and_progress(0, 0.0);
}
emit_signal(SceneStringName(animation_changed));
} else {
int end_frame = MAX(0, frames->get_frame_count(animation) - 1);
bool is_backward = std::signbit(speed_scale * custom_speed_scale);
if (p_from_end && is_backward && frame == 0 && frame_progress <= 0.0) {
set_frame_and_progress(end_frame, 1.0);
} else if (!p_from_end && !is_backward && frame == end_frame && frame_progress >= 1.0) {
set_frame_and_progress(0, 0.0);
}
}
set_process_internal(true);
notify_property_list_changed();
queue_redraw();
}
void AnimatedSprite2D::play_backwards(const StringName &p_name) {
play(p_name, -1, true);
}
void AnimatedSprite2D::_stop_internal(bool p_reset) {
playing = false;<|fim_suffix|> _stop_internal(false);
}
void AnimatedSprite2D::stop() {
_stop_internal(true);
}
double AnimatedSprite2D::_get_frame_duration() {
if (frames.is_valid() && frames->has_animation(animation)) {
return frames->get_frame_duration(animation, frame);
}
return 1.0;
}
void AnimatedSprite2D::_calc_frame_speed_scale() {
frame_speed_scale = 1.0 / _get_frame_duration();
}
void AnimatedSprite2D::set_animation(const StringName &p_name) {
if (animation == p_name) {
return;
}
animation = p_name;
emit_signal(SceneStringName(animation_changed));
if (frames.is_null()) {
animation = StringName();
stop();
ERR_FAIL_MSG(vformat("There is no animation with name '%s'.", p_name));
}
int frame_count = frames->get_frame_count(animation);
if (animation == StringName() || frame_count == 0) {
stop();
return;
} else if (!frames->get_animation_names().has(animation)) {
animation = StringName();
stop();
ERR_FAIL_MSG(vformat("There is no animation with name '%s'.", p_name));
}
if (std::signbit(get_playing_speed())) {
set_frame_and_progress(frame_count - 1, 1.0);
} else {
set_frame_and_progress(0, 0.0);
}
notify_property_list_changed();
queue_redraw();
}
StringName AnimatedSprite2D::get_animation() const {
return animation;
}
PackedStringArray AnimatedSprite2D::get_configuration_warnings() const {
PackedStringArray warnings = Node2D::get_configuration_warnings();
if (frames.is_null()) {
warnings.push_back(RTR("A SpriteFrames resource must be created or set in the \"Sprite Frames\" property in order for AnimatedSprite2D to display frames."));
}
return warnings;
}
#ifdef TOOLS_ENABLED
void AnimatedSprite2D::get_argument_options(const StringName &p_function, int p_idx, List<String> *r_options) const {
const String pf = p_function;
if (p_idx == 0 && frames.is_valid()) {
if (pf == "play" || pf == "play_backwards" || pf == "set_animation" || pf == "set_autoplay") {
List<StringName> al;
frames->get_animation_list(&al);<|fim_middle|> if (p_reset) {
custom_speed_scale = 1.0;
set_frame_and_progress(0, 0.0);
}
notify_property_list_changed();
set_process_internal(false);
}
void AnimatedSprite2D::pause() {
|
return vflip;
}
void AnimatedSprite2D::_res_changed() {
set_frame_and_progress(frame, frame_progress);
queue_redraw();
notify_property_list_changed();
}
bool AnimatedSprite2D::is_playing() const {
return playing;
}
void AnimatedSprite2D::set_autoplay(const String &p_name) {
if (is_inside_tree() && !Engine::get_singleton()->is_editor_hint()) {
WARN_PRINT("Setting autoplay after the node has been added to the scene has no effect.");
}
autoplay = p_name;
}
String AnimatedSprite2D::get_autoplay() const {
return autoplay;
}
void AnimatedSprite2D::play(const StringName &p_name, float p_custom_scale, bool p_from_end) {
StringName name = p_name;
if (name == StringName()) {
name = animation;
}
ERR_FAIL_COND_MSG(frames.is_null(), vformat("There is no animation with name '%s'.", name));
ERR_FAIL_COND_MSG(!frames->get_animation_names().has(name), vformat("There is no animation with name '%s'.", name));
if (frames->get_frame_count(name) == 0) {
return;
}
playing = true;
custom_speed_scale = p_custom_scale;
if (name != animation) {
animation = name;
int end_frame = MAX(0, frames->get_frame_count(animation) - 1);
if (p_from_end) {
set_frame_and_progress(end_frame, 1.0);
} else {
set_frame_and_progress(0, 0.0);
}
emit_signal(SceneStringName(animation_changed));
} else {
int end_frame = MAX(0, frames->get_frame_count(animation) - 1);
bool is_backward = std::signbit(speed_scale * custom_speed_scale);
if (p_from_end && is_backward && frame == 0 && frame_progress <= 0.0) {
set_frame_and_progress(end_frame, 1.0);
} else if (!p_from_end && !is_backward && frame == end_frame && frame_progress >= 1.0) {
set_frame_and_progress(0, 0.0);
}
}
set_process_internal(true);
notify_property_list_changed();
queue_redraw();
}
void AnimatedSprite2D::play_backwards(const StringName &p_name) {
play(p_name, -1, true);
}
void AnimatedSprite2D::_stop_internal(bool p_reset) {
playing = false;
|
if (p_reset) {
custom_speed_scale = 1.0;
set_frame_and_progress(0, 0.0);
}
notify_property_list_changed();
set_process_internal(false);
}
void AnimatedSprite2D::pause() {
|
_stop_internal(false);
}
void AnimatedSprite2D::stop() {
_stop_internal(true);
}
double AnimatedSprite2D::_get_frame_duration() {
if (frames.is_valid() && frames->has_animation(animation)) {
return frames->get_frame_duration(animation, frame);
}
return 1.0;
}
void AnimatedSprite2D::_calc_frame_speed_scale() {
frame_speed_scale = 1.0 / _get_frame_duration();
}
void AnimatedSprite2D::set_animation(const StringName &p_name) {
if (animation == p_name) {
return;
}
animation = p_name;
emit_signal(SceneStringName(animation_changed));
if (frames.is_null()) {
animation = StringName();
stop();
ERR_FAIL_MSG(vformat("There is no animation with name '%s'.", p_name));
}
int frame_count = frames->get_frame_count(animation);
if (animation == StringName() || frame_count == 0) {
stop();
return;
} else if (!frames->get_animation_names().has(animation)) {
animation = StringName();
stop();
ERR_FAIL_MSG(vformat("There is no animation with name '%s'.", p_name));
}
if (std::signbit(get_playing_speed())) {
set_frame_and_progress(frame_count - 1, 1.0);
} else {
set_frame_and_progress(0, 0.0);
}
notify_property_list_changed();
queue_redraw();
}
StringName AnimatedSprite2D::get_animation() const {
return animation;
}
PackedStringArray AnimatedSprite2D::get_configuration_warnings() const {
PackedStringArray warnings = Node2D::get_configuration_warnings();
if (frames.is_null()) {
warnings.push_back(RTR("A SpriteFrames resource must be created or set in the \"Sprite Frames\" property in order for AnimatedSprite2D to display frames."));
}
return warnings;
}
#ifdef TOOLS_ENABLED
void AnimatedSprite2D::get_argument_options(const StringName &p_function, int p_idx, List<String> *r_options) const {
const String pf = p_function;
if (p_idx == 0 && frames.is_valid()) {
if (pf == "play" || pf == "play_backwards" || pf == "set_animation" || pf == "set_autoplay") {
List<StringName> al;
frames->get_animation_list(&al);
|
random
|
<|fim_prefix|> grpc::internal::Call* call) {
ops->FillOps(
call); // Make a copy of call. It's fine since Call just has pointers
}
void* Channel::RegisterMethod(const char* method) {
return grpc_channel_register_call(
c_channel_, method, host_.empty() ? nullptr : host_.c_str(), nullptr);
}
grpc_connectivity_state Channel::GetState(bool try_to_connect) {
return grpc_channel_check_connectivity_state(c_channel_, try_to_connect);
}
namespace {
class TagSaver final : public grpc::internal::CompletionQueueTag {
public:
explicit TagSaver(void* tag) : tag_(tag) {}
~TagSaver() override {}
bool FinalizeResult(void** tag, bool* /*status*/) override {
*tag = tag_;
delete this;
return true;
}
private:
void* tag_;
};
} // namespace
void Channel::NotifyOnStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline,
grpc::CompletionQueue* cq, void* tag) {
TagSaver* tag_saver = new TagSaver(tag);
grpc_channel_watch_connectivity_state(c_channel_, last_observed, deadline,
cq->cq(), tag_saver);
}
bool Channel::WaitForStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline) {
grpc::CompletionQueue cq;
bool ok = false;
void* tag = nullptr;
NotifyOnStateChangeImpl(last_observed, deadline, &cq, nullptr);
cq.Next(&tag, &ok);
GRPC_CHECK_EQ(tag, nullptr);
return ok;
}
namespace {
class ShutdownCallback : public grpc_completion_queue_functor {
public:
ShutdownCallback() {
functor_run = &ShutdownCallback::Run;
// Set inlineable to true since this callback is trivial and thus does not
// need to be run from the EventEngine (potentially triggering a thread
// hop). This should only be used by internal callbacks like this and not by
// user application code.
inlineable = true;<|fim_suffix|> void TakeCQ(grpc::CompletionQueue* cq) { cq_ = cq; }
// The Run function will get invoked by the completion queue library
// when the shutdown is actually complete
static void Run(grpc_completion_queue_functor* cb, int) {
auto* callback = static_cast<ShutdownCallback*>(cb);
delete callback->cq_;
delete callback;
}
private:
grpc::CompletionQueue* cq_ = nullptr;
};
} // namespace
::grpc::CompletionQueue* Channel::CallbackCQ() {
// TODO(vjpai): Consider using a single global CQ for the default CQ
// if there is no explicit per-channel CQ registered
CompletionQueue* callback_cq = callback_cq_.load(std::memory_order_acquire);
if (callback_cq != nullptr) {
return callback_cq;
}
// The callback_cq_ wasn't already set, so grab a lock and set it up exactly
// once for this channel.
grpc::internal::MutexLock l(&mu_);
callback_cq = callback_cq_.load(std::memory_order_relaxed);
if (callback_cq == nullptr) {
if (grpc_iomgr_run_in_background()) {
// gRPC-core provides the backing needed for the preferred CQ type
auto* shutdown_callback = new ShutdownCallback;
callback_cq = new grpc::CompletionQueue(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_CALLBACK, GRPC_CQ_DEFAULT_POLLING,
shutdown_callback});
// Transfer ownership of the new cq to its own shutdown callback
shutdown_callback->TakeCQ(callback_cq);
} else {
// Otherwise we need to use the alternative CQ variant
callback_cq = CompletionQueue::CallbackAlternativeCQ();
}
callback_cq_.store(callback_cq, std::memory_order_release);
}
return callback_cq;
}
} // namespace grpc
<|fim_middle|> }
// TakeCQ takes ownership of the cq into the shutdown callback
// so that the shutdown callback will be responsible for destroying it
|
grpc::internal::Call* call) {
ops->FillOps(
call); // Make a copy of call. It's fine since Call just has pointers
}
void* Channel::RegisterMethod(const char* method) {
return grpc_channel_register_call(
c_channel_, method, host_.empty() ? nullptr : host_.c_str(), nullptr);
}
grpc_connectivity_state Channel::GetState(bool try_to_connect) {
return grpc_channel_check_connectivity_state(c_channel_, try_to_connect);
}
namespace {
class TagSaver final : public grpc::internal::CompletionQueueTag {
public:
explicit TagSaver(void* tag) : tag_(tag) {}
~TagSaver() override {}
bool FinalizeResult(void** tag, bool* /*status*/) override {
*tag = tag_;
delete this;
return true;
}
private:
void* tag_;
};
} // namespace
void Channel::NotifyOnStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline,
grpc::CompletionQueue* cq, void* tag) {
TagSaver* tag_saver = new TagSaver(tag);
grpc_channel_watch_connectivity_state(c_channel_, last_observed, deadline,
cq->cq(), tag_saver);
}
bool Channel::WaitForStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline) {
grpc::CompletionQueue cq;
bool ok = false;
void* tag = nullptr;
NotifyOnStateChangeImpl(last_observed, deadline, &cq, nullptr);
cq.Next(&tag, &ok);
GRPC_CHECK_EQ(tag, nullptr);
return ok;
}
namespace {
class ShutdownCallback : public grpc_completion_queue_functor {
public:
ShutdownCallback() {
functor_run = &ShutdownCallback::Run;
// Set inlineable to true since this callback is trivial and thus does not
// need to be run from the EventEngine (potentially triggering a thread
// hop). This should only be used by internal callbacks like this and not by
// user application code.
inlineable = true;
|
}
// TakeCQ takes ownership of the cq into the shutdown callback
// so that the shutdown callback will be responsible for destroying it
|
void TakeCQ(grpc::CompletionQueue* cq) { cq_ = cq; }
// The Run function will get invoked by the completion queue library
// when the shutdown is actually complete
static void Run(grpc_completion_queue_functor* cb, int) {
auto* callback = static_cast<ShutdownCallback*>(cb);
delete callback->cq_;
delete callback;
}
private:
grpc::CompletionQueue* cq_ = nullptr;
};
} // namespace
::grpc::CompletionQueue* Channel::CallbackCQ() {
// TODO(vjpai): Consider using a single global CQ for the default CQ
// if there is no explicit per-channel CQ registered
CompletionQueue* callback_cq = callback_cq_.load(std::memory_order_acquire);
if (callback_cq != nullptr) {
return callback_cq;
}
// The callback_cq_ wasn't already set, so grab a lock and set it up exactly
// once for this channel.
grpc::internal::MutexLock l(&mu_);
callback_cq = callback_cq_.load(std::memory_order_relaxed);
if (callback_cq == nullptr) {
if (grpc_iomgr_run_in_background()) {
// gRPC-core provides the backing needed for the preferred CQ type
auto* shutdown_callback = new ShutdownCallback;
callback_cq = new grpc::CompletionQueue(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_CALLBACK, GRPC_CQ_DEFAULT_POLLING,
shutdown_callback});
// Transfer ownership of the new cq to its own shutdown callback
shutdown_callback->TakeCQ(callback_cq);
} else {
// Otherwise we need to use the alternative CQ variant
callback_cq = CompletionQueue::CallbackAlternativeCQ();
}
callback_cq_.store(callback_cq, std::memory_order_release);
}
return callback_cq;
}
} // namespace grpc
|
random
|
<|fim_prefix|>ceAgent* agent);
namespace tfw_internal {
#if defined(TF_ENABLE_ACTIVITY_WATCHER)
// Records an activity start without checking whether the watcher is enabled.
ActivityId RecordActivityStart(std::unique_ptr<Activity> activity);
// Records an activity end without checking whether the activity_id is valid.
void RecordActivityEnd(ActivityId activity_id);
TF_EXPORT extern std::atomic<int> g_watcher_level;
// Returns whether the activitity watcher is enabled.
inline bool WatcherEnabled(int level = 1) {
return g_watcher_level.load(std::memory_order_acquire) >= level;
}
#endif
// NOTE: Borrowed from boost C++ libraries because std::is_invocable_r is not
// available in Android NDK.
template <typename R, typename F, typename... Args>
struct is_invocable_r
: std::is_constructible<
std::function<R(Args...)>,
std::reference_wrapper<typename std::remove_reference<F>::type>> {};
} // namespace tfw_internal
template <typename F>
constexpr bool is_activity_generator =
tfw_internal::is_invocable_r<std::unique_ptr<Activity>, F>::value;
// Records an activity explicitly. Useful when the start and end of an activity
// happen in different threads. Generates the Activity only if activity
// watching is enabled, useful for avoiding expensive operations when activity
// watching is disabled.
// Example Usage:
// auto aid = ActivityStart([&]() {
// return std::make_unique<Activity>(
// op_name, category,
// Activity::Attributes{{"key1", value1}, {"key2", value2}});
// }, /*level=*/2);
// DoSomething();
// ActivityEnd(aid);
template <
typename ActivityGenerator,
std::enable_if_t<is_activity_generator<ActivityGenerator>, bool> = true>
inline ActivityId ActivityStart(ActivityGenerator&& gen, int level = 1) {
#if defined(TF_ENABLE_ACTIVITY_WATCHER)
if (TF_PREDICT_FALSE(tfw_internal::WatcherEnabled(level))) {
return tfw_internal::RecordActivityStart(
std::forward<ActivityGenerator>(gen)());
}
#endif
<|fim_suffix|>
}
inline void ActivityEnd(ActivityId id) {
#if defined(TF_ENABLE_ACTIVITY_WATCHER)
if (TF_PREDICT_FALSE(id != kActivityNotRecorded)) {
tfw_internal::RecordActivityEnd(id);
}
#endif
}
// ActivityScope marks a scope as an activity and record it with a global
// ActivityRecorder.
// Example Usage:
// {
// ActivityScope activity_scope([&]() {
// return std::make_unique<Activity>(
// op_name, ActivityCategory::kMisc,
// Activity::Attributes{{"key1", value1}, {"key2", value2}});
// }, /*level=*/2);
// DoSomething();
// }
class ActivityScope {
public:
template <
typename ActivityGenerator,
std::enable_if_t<is_activity_generator<ActivityGenerator>, bool> = true>
explicit ActivityScope(ActivityGenerator&& gen, int level = 1) {
activity_id_ = ActivityStart(std::forward<ActivityGenerator>(gen), level);
}
ActivityScope(ActivityScope&& activity) {
activity_id_ = activity.activity_id_;
activity.activity_id_ = kActivityNotRecorded;
}
~ActivityScope() { ActivityEnd(activity_id_); }
private:
ActivityId activity_id_;
ActivityScope(const ActivityScope&) = delete;
void operator=(const ActivityScope&) = delete;
};
} // namespace activity_watcher
} // namespace tensorflow
#endif // TENSORFLOW_CORE_ACTIVITY_WATCHER_ACTIVITY_H_
<|fim_middle|>return kActivityNotRecorded;
|
ceAgent* agent);
namespace tfw_internal {
#if defined(TF_ENABLE_ACTIVITY_WATCHER)
// Records an activity start without checking whether the watcher is enabled.
ActivityId RecordActivityStart(std::unique_ptr<Activity> activity);
// Records an activity end without checking whether the activity_id is valid.
void RecordActivityEnd(ActivityId activity_id);
TF_EXPORT extern std::atomic<int> g_watcher_level;
// Returns whether the activitity watcher is enabled.
inline bool WatcherEnabled(int level = 1) {
return g_watcher_level.load(std::memory_order_acquire) >= level;
}
#endif
// NOTE: Borrowed from boost C++ libraries because std::is_invocable_r is not
// available in Android NDK.
template <typename R, typename F, typename... Args>
struct is_invocable_r
: std::is_constructible<
std::function<R(Args...)>,
std::reference_wrapper<typename std::remove_reference<F>::type>> {};
} // namespace tfw_internal
template <typename F>
constexpr bool is_activity_generator =
tfw_internal::is_invocable_r<std::unique_ptr<Activity>, F>::value;
// Records an activity explicitly. Useful when the start and end of an activity
// happen in different threads. Generates the Activity only if activity
// watching is enabled, useful for avoiding expensive operations when activity
// watching is disabled.
// Example Usage:
// auto aid = ActivityStart([&]() {
// return std::make_unique<Activity>(
// op_name, category,
// Activity::Attributes{{"key1", value1}, {"key2", value2}});
// }, /*level=*/2);
// DoSomething();
// ActivityEnd(aid);
template <
typename ActivityGenerator,
std::enable_if_t<is_activity_generator<ActivityGenerator>, bool> = true>
inline ActivityId ActivityStart(ActivityGenerator&& gen, int level = 1) {
#if defined(TF_ENABLE_ACTIVITY_WATCHER)
if (TF_PREDICT_FALSE(tfw_internal::WatcherEnabled(level))) {
return tfw_internal::RecordActivityStart(
std::forward<ActivityGenerator>(gen)());
}
#endif
|
return kActivityNotRecorded;
|
}
inline void ActivityEnd(ActivityId id) {
#if defined(TF_ENABLE_ACTIVITY_WATCHER)
if (TF_PREDICT_FALSE(id != kActivityNotRecorded)) {
tfw_internal::RecordActivityEnd(id);
}
#endif
}
// ActivityScope marks a scope as an activity and record it with a global
// ActivityRecorder.
// Example Usage:
// {
// ActivityScope activity_scope([&]() {
// return std::make_unique<Activity>(
// op_name, ActivityCategory::kMisc,
// Activity::Attributes{{"key1", value1}, {"key2", value2}});
// }, /*level=*/2);
// DoSomething();
// }
class ActivityScope {
public:
template <
typename ActivityGenerator,
std::enable_if_t<is_activity_generator<ActivityGenerator>, bool> = true>
explicit ActivityScope(ActivityGenerator&& gen, int level = 1) {
activity_id_ = ActivityStart(std::forward<ActivityGenerator>(gen), level);
}
ActivityScope(ActivityScope&& activity) {
activity_id_ = activity.activity_id_;
activity.activity_id_ = kActivityNotRecorded;
}
~ActivityScope() { ActivityEnd(activity_id_); }
private:
ActivityId activity_id_;
ActivityScope(const ActivityScope&) = delete;
void operator=(const ActivityScope&) = delete;
};
} // namespace activity_watcher
} // namespace tensorflow
#endif // TENSORFLOW_CORE_ACTIVITY_WATCHER_ACTIVITY_H_
|
ast_based
|
<|fim_prefix|> ev->set_tilt(p_points[idx].tilt);
Input::get_singleton()->parse_input_event(ev);
touch.write[i].pos = p_points[idx].pos;
}
} break;
case AMOTION_EVENT_ACTION_CANCEL: {
_cancel_all_touch();
} break;
case AMOTION_EVENT_ACTION_UP: { //release
_release_all_touch();
} break;
case AMOTION_EVENT_ACTION_POINTER_DOWN: { // add touch
for (int i = 0; i < p_points.size(); i++) {
if (p_points[i].id == p_pointer) {
TouchPos tp = p_points[i];
touch.push_back(tp);
Ref<InputEventScreenTouch> ev;
ev.instantiate();
ev->set_index(tp.id);
ev->set_pressed(true);
ev->set_position(tp.pos);
Input::get_singleton()->parse_input_event(ev);
break;
}
}
} break;
case AMOTION_EVENT_ACTION_POINTER_UP: { // remove touch
for (int i = 0; i < touch.size(); i++) {
if (touch[i].id == p_pointer) {
Ref<InputEventScreenTouch> ev;
ev.instantiate();
ev->set_index(touch[i].id);
ev->set_pressed(false);
ev->set_position(touch[i].pos);
Input::get_singleton()->parse_input_event(ev);
touch.remove_at(i);
break;
}
}
} break;
}
}
void AndroidInputHandler::_cancel_mouse_event_info(bool p_source_mouse_relative) {
buttons_state = BitField<MouseButtonMask>();
_parse_mouse_event_info(BitField<MouseButtonMask>(), false, true, false, p_source_mouse_relative);
mouse_event_info.valid = false;
}
void AndroidInputHandler::_parse_mouse_event_info(BitField<MouseButtonMask> event_buttons_mask, bool p_pressed, bool p_canceled, bool p_double_click, bool p_source_mouse_relative) {
if (!mouse_event_info.valid) {
return;
}
Ref<InputEventMouseButton> ev;
ev.instantiate();
_set_key_modifier_state(ev, Key::NONE);
if (p_source_mouse_relative) {
ev->set_position(hover_prev_pos);
ev->set_global_position(hover_prev_pos);
} else {
ev->set_position(mouse_event_info.pos);
ev->set_global_position(mouse_event_info.pos);
hover_prev_pos = mouse_event_info.pos;<|fim_suffix|> buttons_state = event_buttons_mask;
ev->set_button_index(_button_index_from_mask(changed_button_mask));
ev->set_button_mask(event_buttons_mask);
ev->set_double_click(p_double_click);
Input::get_singleton()->parse_input_event(ev);
}
void AndroidInputHandler::_release_mouse_event_info(bool p_source_mouse_relative) {
_parse_mouse_event_info(BitField<MouseButtonMask>(), false, false, false, p_source_mouse_relative);
mouse_event_info.valid = false;
}
void AndroidInputHandler::process_mouse_event(int p_event_action, int p_event_android_buttons_mask, Point2 p_event_pos, Vector2 p_delta, bool p_double_click, bool p_source_mouse_relative, float p_pressure, Vector2 p_tilt) {
BitField<MouseButtonMask> event_buttons_mask = _android_button_mask_to_godot_button_mask(p_event_android_buttons_mask);
switch (p_event_action) {
case AMOTION_EVENT_ACTION_HOVER_MOVE: // hover move
case AMOTION_EVENT_ACTION_HOVER_ENTER: // hover enter
case AMOTION_EVENT_ACTION_HOVER_EXIT: { // hover exit
// https://developer.android.com/reference/android/view/MotionEvent.html#ACTION_HOVER_ENTER
Ref<InputEventMouseMotion> ev;
ev.instantiate();
_set_key_modifier_state(ev, Key::NONE);
ev->set_position(p_event_pos);
ev->set_global_position(p_event_pos);
ev->set_relative(p_event_pos - hover_prev_pos);
ev->set_relative_screen_position(ev->get_relative());
Input::get_singleton()->parse_input_event(ev);
hover_prev_pos = p_event_pos;
} break;
case AMOTION_EVENT_ACTION_DOWN:
case AMOTION_EVENT_ACTION_BUTTON_PRESS: {
// Release any remaining touches or mouse event
_release_mouse_event_info();
_release_all_touch();
mouse_event_info.valid = true;
mouse_event_info.pos = p_event_pos;
_parse_mouse_event_info(event_buttons_mask, true, false, p_double_click, p_source_mouse_relative);
} break;
case AMOTION_EVENT_ACTION_CANCEL: {
_cancel_mouse_event_info(p_source_mouse_relative);
} break;
case AMOTION_EVENT_ACTION_UP:<|fim_middle|> }
ev->set_pressed(p_pressed);
ev->set_canceled(p_canceled);
BitField<MouseButtonMask> changed_button_mask = buttons_state.get_different(event_buttons_mask);
|
ev->set_tilt(p_points[idx].tilt);
Input::get_singleton()->parse_input_event(ev);
touch.write[i].pos = p_points[idx].pos;
}
} break;
case AMOTION_EVENT_ACTION_CANCEL: {
_cancel_all_touch();
} break;
case AMOTION_EVENT_ACTION_UP: { //release
_release_all_touch();
} break;
case AMOTION_EVENT_ACTION_POINTER_DOWN: { // add touch
for (int i = 0; i < p_points.size(); i++) {
if (p_points[i].id == p_pointer) {
TouchPos tp = p_points[i];
touch.push_back(tp);
Ref<InputEventScreenTouch> ev;
ev.instantiate();
ev->set_index(tp.id);
ev->set_pressed(true);
ev->set_position(tp.pos);
Input::get_singleton()->parse_input_event(ev);
break;
}
}
} break;
case AMOTION_EVENT_ACTION_POINTER_UP: { // remove touch
for (int i = 0; i < touch.size(); i++) {
if (touch[i].id == p_pointer) {
Ref<InputEventScreenTouch> ev;
ev.instantiate();
ev->set_index(touch[i].id);
ev->set_pressed(false);
ev->set_position(touch[i].pos);
Input::get_singleton()->parse_input_event(ev);
touch.remove_at(i);
break;
}
}
} break;
}
}
void AndroidInputHandler::_cancel_mouse_event_info(bool p_source_mouse_relative) {
buttons_state = BitField<MouseButtonMask>();
_parse_mouse_event_info(BitField<MouseButtonMask>(), false, true, false, p_source_mouse_relative);
mouse_event_info.valid = false;
}
void AndroidInputHandler::_parse_mouse_event_info(BitField<MouseButtonMask> event_buttons_mask, bool p_pressed, bool p_canceled, bool p_double_click, bool p_source_mouse_relative) {
if (!mouse_event_info.valid) {
return;
}
Ref<InputEventMouseButton> ev;
ev.instantiate();
_set_key_modifier_state(ev, Key::NONE);
if (p_source_mouse_relative) {
ev->set_position(hover_prev_pos);
ev->set_global_position(hover_prev_pos);
} else {
ev->set_position(mouse_event_info.pos);
ev->set_global_position(mouse_event_info.pos);
hover_prev_pos = mouse_event_info.pos;
|
}
ev->set_pressed(p_pressed);
ev->set_canceled(p_canceled);
BitField<MouseButtonMask> changed_button_mask = buttons_state.get_different(event_buttons_mask);
|
buttons_state = event_buttons_mask;
ev->set_button_index(_button_index_from_mask(changed_button_mask));
ev->set_button_mask(event_buttons_mask);
ev->set_double_click(p_double_click);
Input::get_singleton()->parse_input_event(ev);
}
void AndroidInputHandler::_release_mouse_event_info(bool p_source_mouse_relative) {
_parse_mouse_event_info(BitField<MouseButtonMask>(), false, false, false, p_source_mouse_relative);
mouse_event_info.valid = false;
}
void AndroidInputHandler::process_mouse_event(int p_event_action, int p_event_android_buttons_mask, Point2 p_event_pos, Vector2 p_delta, bool p_double_click, bool p_source_mouse_relative, float p_pressure, Vector2 p_tilt) {
BitField<MouseButtonMask> event_buttons_mask = _android_button_mask_to_godot_button_mask(p_event_android_buttons_mask);
switch (p_event_action) {
case AMOTION_EVENT_ACTION_HOVER_MOVE: // hover move
case AMOTION_EVENT_ACTION_HOVER_ENTER: // hover enter
case AMOTION_EVENT_ACTION_HOVER_EXIT: { // hover exit
// https://developer.android.com/reference/android/view/MotionEvent.html#ACTION_HOVER_ENTER
Ref<InputEventMouseMotion> ev;
ev.instantiate();
_set_key_modifier_state(ev, Key::NONE);
ev->set_position(p_event_pos);
ev->set_global_position(p_event_pos);
ev->set_relative(p_event_pos - hover_prev_pos);
ev->set_relative_screen_position(ev->get_relative());
Input::get_singleton()->parse_input_event(ev);
hover_prev_pos = p_event_pos;
} break;
case AMOTION_EVENT_ACTION_DOWN:
case AMOTION_EVENT_ACTION_BUTTON_PRESS: {
// Release any remaining touches or mouse event
_release_mouse_event_info();
_release_all_touch();
mouse_event_info.valid = true;
mouse_event_info.pos = p_event_pos;
_parse_mouse_event_info(event_buttons_mask, true, false, p_double_click, p_source_mouse_relative);
} break;
case AMOTION_EVENT_ACTION_CANCEL: {
_cancel_mouse_event_info(p_source_mouse_relative);
} break;
case AMOTION_EVENT_ACTION_UP:
|
random
|
<|fim_prefix|> RID &p_id, int p_row_index, int p_column_index) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_row_index(ae->node, p_row_index);
accesskit_node_set_column_index(ae->node, p_column_index);
}
void AccessibilityDriverAccessKit::accessibility_update_set_table_cell_span(const RID &p_id, int p_row_span, int p_column_span) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_row_span(ae->node, p_row_span);
accesskit_node_set_column_span(ae->node, p_column_span);
}
void AccessibilityDriverAccessKit::accessibility_update_set_list_item_count(const RID &p_id, int p_size) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_size_of_set(ae->node, p_size);
}
void AccessibilityDriverAccessKit::accessibility_update_set_list_item_index(const RID &p_id, int p_index) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_position_in_set(ae->node, p_index);
}
void AccessibilityDriverAccessKit::accessibility_update_set_list_item_level(const RID &p_id, int p_level) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
<|fim_suffix|>
accesskit_node_set_level(ae->node, p_level);
}
void AccessibilityDriverAccessKit::accessibility_update_set_list_item_selected(const RID &p_id, bool p_selected) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_selected(ae->node, p_selected);
}
void AccessibilityDriverAccessKit::accessibility_update_set_list_item_expanded(const RID &p_id, bool p_expanded) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_expanded(ae->node, p_expanded);
}
void AccessibilityDriverAccessKit::accessibility_update_set_popup_type(const RID &p_id, DisplayServer::AccessibilityPopupType p_popup) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
switch (p_popup) {
case DisplayServer::AccessibilityPopupType::POPUP_MENU: {
accesskit_node_set_has_popup(ae->node, ACCESSKIT_HAS_POPUP_MENU);
} break;
case DisplayServer::AccessibilityPopupType::POPUP_LIST: {
accesskit_node_set_has_popup(ae->node, ACCESSKIT_HAS_POPUP_LISTBOX);
} break;
case DisplayServer::AccessibilityPopupType::POPUP_TREE: {
accesskit_node_set_has_popup(ae->node, ACCESSKIT_HAS_POPUP_TREE);
} break;
case DisplayServer::AccessibilityPopupType::POPUP_DIALOG: {
accesskit_node_set_has_popup(ae->node, ACCESSKIT_HAS_POPUP_DIALOG);
} break;
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_checked(const RID &p_id, bool p_checekd) {
ERR_FAIL_COND_MSG(!in_<|fim_middle|>AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
|
RID &p_id, int p_row_index, int p_column_index) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_row_index(ae->node, p_row_index);
accesskit_node_set_column_index(ae->node, p_column_index);
}
void AccessibilityDriverAccessKit::accessibility_update_set_table_cell_span(const RID &p_id, int p_row_span, int p_column_span) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_row_span(ae->node, p_row_span);
accesskit_node_set_column_span(ae->node, p_column_span);
}
void AccessibilityDriverAccessKit::accessibility_update_set_list_item_count(const RID &p_id, int p_size) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_size_of_set(ae->node, p_size);
}
void AccessibilityDriverAccessKit::accessibility_update_set_list_item_index(const RID &p_id, int p_index) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_position_in_set(ae->node, p_index);
}
void AccessibilityDriverAccessKit::accessibility_update_set_list_item_level(const RID &p_id, int p_level) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
|
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
|
accesskit_node_set_level(ae->node, p_level);
}
void AccessibilityDriverAccessKit::accessibility_update_set_list_item_selected(const RID &p_id, bool p_selected) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_selected(ae->node, p_selected);
}
void AccessibilityDriverAccessKit::accessibility_update_set_list_item_expanded(const RID &p_id, bool p_expanded) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_expanded(ae->node, p_expanded);
}
void AccessibilityDriverAccessKit::accessibility_update_set_popup_type(const RID &p_id, DisplayServer::AccessibilityPopupType p_popup) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
switch (p_popup) {
case DisplayServer::AccessibilityPopupType::POPUP_MENU: {
accesskit_node_set_has_popup(ae->node, ACCESSKIT_HAS_POPUP_MENU);
} break;
case DisplayServer::AccessibilityPopupType::POPUP_LIST: {
accesskit_node_set_has_popup(ae->node, ACCESSKIT_HAS_POPUP_LISTBOX);
} break;
case DisplayServer::AccessibilityPopupType::POPUP_TREE: {
accesskit_node_set_has_popup(ae->node, ACCESSKIT_HAS_POPUP_TREE);
} break;
case DisplayServer::AccessibilityPopupType::POPUP_DIALOG: {
accesskit_node_set_has_popup(ae->node, ACCESSKIT_HAS_POPUP_DIALOG);
} break;
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_checked(const RID &p_id, bool p_checekd) {
ERR_FAIL_COND_MSG(!in_
|
ast_based
|
<|fim_prefix|> last_num_graph_nodes_ = num_nodes;
return session_->Extend(graph_def);
}
return absl::OkStatus();
}
absl::Status ClientSession::Run(const RunOptions& run_options,
const FeedType& inputs,
const std::vector<Output>& fetch_outputs,
const std::vector<Operation>& run_outputs,
std::vector<Tensor>* outputs,
RunMetadata* run_metadata) const {
std::vector<std::pair<string, Tensor>> feeds;
feeds.reserve(inputs.size());
for (auto const& feed : inputs) {
TF_RETURN_IF_ERROR(feed.second.status);
feeds.emplace_back(std::piecewise_construct,
std::forward_as_tuple(feed.first.name()),
std::forward_as_tuple(feed.second.tensor));
}
std::vector<string> output_tensor_names;
output_tensor_names.reserve(fetch_outputs.size());
for (auto const& output : fetch_outputs) {
output_tensor_names.push_back(output.name());
}
std::vector<string> target_node_names;
target_node_names.reserve(run_outputs.size());
for (auto const& output : run_outputs) {
target_node_names.push_back(output.node()->name());
}
TF_RETURN_IF_ERROR(impl()->MaybeExtendGraph());
return impl()->session_->Run(run_options, feeds, output_tensor_names,
target_node_names, outputs, run_metadata);
}
absl::Status ClientSession::Run(
const RunOptions& run_options, const FeedType& inputs,
const std::vector<Output>& fetch_outputs,
const std::vector<Operation>& run_outputs, std::vector<Tensor>* outputs,
RunMetadata* run_metadata,
const thread::ThreadPoolOptions& threadpool_options) const {
std::vector<std::pair<string, Tensor>> feeds;
for (auto const& feed : inputs) {
TF_RETURN_IF_ERROR(feed.second.status);
feeds.emplace_back(feed.first.name(), feed.second.tensor);
}<|fim_suffix|> }
std::vector<string> target_node_names;
target_node_names.reserve(run_outputs.size());
for (auto const& output : run_outputs) {
target_node_names.push_back(output.node()->name());
}
TF_RETURN_IF_ERROR(impl()->MaybeExtendGraph());
return impl()->session_->Run(run_options, feeds, output_tensor_names,
target_node_names, outputs, run_metadata,
threadpool_options);
}
absl::Status ClientSession::MakeCallable(
const CallableOptions& callable_options, CallableHandle* out_handle) {
TF_RETURN_IF_ERROR(impl()->MaybeExtendGraph());
return impl()->session_->MakeCallable(callable_options, out_handle);
}
absl::Status ClientSession::RunCallable(CallableHandle handle,
const std::vector<Tensor>& feed_tensors,
std::vector<Tensor>* fetch_tensors,
RunMetadata* run_metadata) {
return impl()->session_->RunCallable(handle, feed_tensors, fetch_tensors,
run_metadata);
}
absl::Status ClientSession::RunCallable(
CallableHandle handle, const std::vector<Tensor>& feed_tensors,
std::vector<Tensor>* fetch_tensors, RunMetadata* run_metadata,
const thread::ThreadPoolOptions& options) {
return impl()->session_->RunCallable(handle, feed_tensors, fetch_tensors,
run_metadata, options);
}
absl::Status ClientSession::ReleaseCallable(CallableHandle handle) {
return impl()->session_->ReleaseCallable(handle);
}
} // end namespace tensorflow
<|fim_middle|> std::vector<string> output_tensor_names;
output_tensor_names.reserve(fetch_outputs.size());
for (auto const& output : fetch_outputs) {
output_tensor_names.push_back(output.name());
|
last_num_graph_nodes_ = num_nodes;
return session_->Extend(graph_def);
}
return absl::OkStatus();
}
absl::Status ClientSession::Run(const RunOptions& run_options,
const FeedType& inputs,
const std::vector<Output>& fetch_outputs,
const std::vector<Operation>& run_outputs,
std::vector<Tensor>* outputs,
RunMetadata* run_metadata) const {
std::vector<std::pair<string, Tensor>> feeds;
feeds.reserve(inputs.size());
for (auto const& feed : inputs) {
TF_RETURN_IF_ERROR(feed.second.status);
feeds.emplace_back(std::piecewise_construct,
std::forward_as_tuple(feed.first.name()),
std::forward_as_tuple(feed.second.tensor));
}
std::vector<string> output_tensor_names;
output_tensor_names.reserve(fetch_outputs.size());
for (auto const& output : fetch_outputs) {
output_tensor_names.push_back(output.name());
}
std::vector<string> target_node_names;
target_node_names.reserve(run_outputs.size());
for (auto const& output : run_outputs) {
target_node_names.push_back(output.node()->name());
}
TF_RETURN_IF_ERROR(impl()->MaybeExtendGraph());
return impl()->session_->Run(run_options, feeds, output_tensor_names,
target_node_names, outputs, run_metadata);
}
absl::Status ClientSession::Run(
const RunOptions& run_options, const FeedType& inputs,
const std::vector<Output>& fetch_outputs,
const std::vector<Operation>& run_outputs, std::vector<Tensor>* outputs,
RunMetadata* run_metadata,
const thread::ThreadPoolOptions& threadpool_options) const {
std::vector<std::pair<string, Tensor>> feeds;
for (auto const& feed : inputs) {
TF_RETURN_IF_ERROR(feed.second.status);
feeds.emplace_back(feed.first.name(), feed.second.tensor);
}
|
std::vector<string> output_tensor_names;
output_tensor_names.reserve(fetch_outputs.size());
for (auto const& output : fetch_outputs) {
output_tensor_names.push_back(output.name());
|
}
std::vector<string> target_node_names;
target_node_names.reserve(run_outputs.size());
for (auto const& output : run_outputs) {
target_node_names.push_back(output.node()->name());
}
TF_RETURN_IF_ERROR(impl()->MaybeExtendGraph());
return impl()->session_->Run(run_options, feeds, output_tensor_names,
target_node_names, outputs, run_metadata,
threadpool_options);
}
absl::Status ClientSession::MakeCallable(
const CallableOptions& callable_options, CallableHandle* out_handle) {
TF_RETURN_IF_ERROR(impl()->MaybeExtendGraph());
return impl()->session_->MakeCallable(callable_options, out_handle);
}
absl::Status ClientSession::RunCallable(CallableHandle handle,
const std::vector<Tensor>& feed_tensors,
std::vector<Tensor>* fetch_tensors,
RunMetadata* run_metadata) {
return impl()->session_->RunCallable(handle, feed_tensors, fetch_tensors,
run_metadata);
}
absl::Status ClientSession::RunCallable(
CallableHandle handle, const std::vector<Tensor>& feed_tensors,
std::vector<Tensor>* fetch_tensors, RunMetadata* run_metadata,
const thread::ThreadPoolOptions& options) {
return impl()->session_->RunCallable(handle, feed_tensors, fetch_tensors,
run_metadata, options);
}
absl::Status ClientSession::ReleaseCallable(CallableHandle handle) {
return impl()->session_->ReleaseCallable(handle);
}
} // end namespace tensorflow
|
random
|
<|fim_prefix|>ncluding */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/config/project_settings.h"
#include "core/io/dir_access.h"
#include "core/variant/variant.h"
#include "tests/test_macros.h"
class TestProjectSettingsInternalsAccessor {
public:
static String &resource_path() {
return ProjectSettings::get_singleton()->resource_path;
}
};
namespace TestProjectSettings {
TEST_CASE("[ProjectSettings] Get existing setting") {
CHECK(ProjectSettings::get_singleton()->has_setting("application/run/main_scene"));
Variant variant = ProjectSettings::get_singleton()->get_setting("application/run/main_scene");
CHECK_EQ(variant.get_type(), Variant::STRING);
String name = variant;
CHECK_EQ(name, String());
}
TEST_CASE("[ProjectSettings] Default value is ignored if setting exists") <|fim_suffix|>
Variant variant = ProjectSettings::get_singleton()->get_setting("application/run/main_scene", "SomeDefaultValue");
CHECK_EQ(variant.get_type(), Variant::STRING);
String name = variant;
CHECK_EQ(name, String());
}
TEST_CASE("[ProjectSettings] Non existing setting is null") {
CHECK_FALSE(ProjectSettings::get_singleton()->has_setting("not_existing_setting"));
Variant variant = ProjectSettings::get_singleton()->get_setting("not_existing_setting");
CHECK_EQ(variant.get_type(), Variant::NIL);
}
TEST_CASE("[ProjectSettings] Non existing setting should return default value") {
CHECK_FALSE(ProjectSettings::get_singleton()->has_setting("not_existing_setting"));
Variant variant = ProjectSettings::get_singleton()->get_setting("not_existing_setting");
CHECK_EQ(variant.get_type(), Variant::NIL);
variant = ProjectSettings::get_singleton()->get_setting("not_existing_setting", "my_nice_default_value");
CHECK_EQ(variant.get_type(), Variant::STRING);
String name = variant;
CHECK_EQ(name, "my_nice_default_value");
CHECK_FALSE(ProjectSettings::get_singleton()->has_setting("not_existing_setting"));
}
TEST_CASE("[ProjectSettings] Set value should be returned when retrieved") {
CHECK_FALSE(ProjectSettings::get_singleton()->has_setting("my_custom_setting"));
Variant variant = ProjectSettings::get_singleton()->get_setting("my_custom_setting");
CHECK_EQ(variant.get_type(), Variant::NIL);
ProjectSettings::get_singleton()->set_setting("my_custom_setting", true);
CHECK(ProjectSettings::get_singleton()->has_setting("my_custom_setting"));
variant = ProjectSettings::get_singleton()->get_setting("my_custom_setting");
CHECK_EQ(variant.get_type(), Variant::BOOL);
bool value = variant;
CHECK_EQ(true, value);
CHECK(ProjectSettings::get_singleton()->has_setting("my_custom_setting"));
}
TEST_CASE("[ProjectSettings] localize_path") {
String old_resource_path = TestProjectSettingsInternalsAccessor::resource_path();
TestProjectSettingsInternalsAccessor::resource_pa<|fim_middle|>{
CHECK(ProjectSettings::get_singleton()->has_setting("application/run/main_scene"));
|
ncluding */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/config/project_settings.h"
#include "core/io/dir_access.h"
#include "core/variant/variant.h"
#include "tests/test_macros.h"
class TestProjectSettingsInternalsAccessor {
public:
static String &resource_path() {
return ProjectSettings::get_singleton()->resource_path;
}
};
namespace TestProjectSettings {
TEST_CASE("[ProjectSettings] Get existing setting") {
CHECK(ProjectSettings::get_singleton()->has_setting("application/run/main_scene"));
Variant variant = ProjectSettings::get_singleton()->get_setting("application/run/main_scene");
CHECK_EQ(variant.get_type(), Variant::STRING);
String name = variant;
CHECK_EQ(name, String());
}
TEST_CASE("[ProjectSettings] Default value is ignored if setting exists")
|
{
CHECK(ProjectSettings::get_singleton()->has_setting("application/run/main_scene"));
|
Variant variant = ProjectSettings::get_singleton()->get_setting("application/run/main_scene", "SomeDefaultValue");
CHECK_EQ(variant.get_type(), Variant::STRING);
String name = variant;
CHECK_EQ(name, String());
}
TEST_CASE("[ProjectSettings] Non existing setting is null") {
CHECK_FALSE(ProjectSettings::get_singleton()->has_setting("not_existing_setting"));
Variant variant = ProjectSettings::get_singleton()->get_setting("not_existing_setting");
CHECK_EQ(variant.get_type(), Variant::NIL);
}
TEST_CASE("[ProjectSettings] Non existing setting should return default value") {
CHECK_FALSE(ProjectSettings::get_singleton()->has_setting("not_existing_setting"));
Variant variant = ProjectSettings::get_singleton()->get_setting("not_existing_setting");
CHECK_EQ(variant.get_type(), Variant::NIL);
variant = ProjectSettings::get_singleton()->get_setting("not_existing_setting", "my_nice_default_value");
CHECK_EQ(variant.get_type(), Variant::STRING);
String name = variant;
CHECK_EQ(name, "my_nice_default_value");
CHECK_FALSE(ProjectSettings::get_singleton()->has_setting("not_existing_setting"));
}
TEST_CASE("[ProjectSettings] Set value should be returned when retrieved") {
CHECK_FALSE(ProjectSettings::get_singleton()->has_setting("my_custom_setting"));
Variant variant = ProjectSettings::get_singleton()->get_setting("my_custom_setting");
CHECK_EQ(variant.get_type(), Variant::NIL);
ProjectSettings::get_singleton()->set_setting("my_custom_setting", true);
CHECK(ProjectSettings::get_singleton()->has_setting("my_custom_setting"));
variant = ProjectSettings::get_singleton()->get_setting("my_custom_setting");
CHECK_EQ(variant.get_type(), Variant::BOOL);
bool value = variant;
CHECK_EQ(true, value);
CHECK(ProjectSettings::get_singleton()->has_setting("my_custom_setting"));
}
TEST_CASE("[ProjectSettings] localize_path") {
String old_resource_path = TestProjectSettingsInternalsAccessor::resource_path();
TestProjectSettingsInternalsAccessor::resource_pa
|
ast_based
|
<|fim_prefix|>// Part of the Carbon Language project, under the Apache License v2.0 with LLVM
// Exceptions. See /LICENSE for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#ifndef CARBON_TOOLCHAIN_BASE_BLOCK_VALUE_STORE_H_
#define CARBON_TOOLCHAIN_BASE_BLOCK_VALUE_STORE_H_
<|fim_suffix|>#include "toolchain/base/yaml.h"
namespace Carbon::SemIR {
// Provides a block-based ValueStore, which uses slab allocation of added
// blocks. This allows references to values to outlast vector resizes that might
// otherwise invalidate references.
//
// BlockValueStore is used as-is, but there are also children that expose the
// protected members for type-specific functionality.
template <typename IdT, typename ElementT>
class BlockValueStore : public Yaml::Printable<BlockValueStore<IdT, ElementT>> {
public:
using IdType = IdT;
using ElementType = ElementT;
using RefType = llvm::MutableArrayRef<ElementT>;
using ConstRefType = llvm::ArrayRef<ElementT>;
explicit BlockValueStore(llvm::BumpPtrAllocator& allocator)
: allocator_(&allocator) {
auto empty = RefType();
auto empty_val = canonical_blocks_.Insert(
empty, [&] { return values_.Add(empty); }, KeyContext(this));
CARBON_CHECK(empty_val.key() == IdT::Empty);
}
// Adds a block with the given content, returning an ID to reference it.
auto Add(ConstRefType content) -> IdT {
if (content.empty()) {
return IdT::Empty;
}
return values_.Add(AllocateCopy(content));
}
// Returns the requested block.
auto Get(IdT id) const -> ConstRefType { return values_.Get(id); }
// Returns a mutable view of the requested block. This operation should be
// avoided where possible; we generally want blocks to be immutable once
// created.
auto GetMutable(IdT id) -> RefType { return values_.Get(id); }
// Returns a new block formed by applying `transform(elem_id)` to each element
// in the specified block.
template <typename TransformFnT>
auto Transform(IdT id, TransformFnT transform) -> IdT {
llvm::SmallVector<ElementType> block(llvm::map_range(Get(id), transform));
return Add(block);
}
// Adds a block or finds an existing canonical block with the given content,
// and returns an ID to reference it.<|fim_middle|>#include <type_traits>
#include "common/check.h"
#include "common/set.h"
#include "llvm/Support/Allocator.h"
#include "toolchain/base/mem_usage.h"
#include "toolchain/base/value_store.h"
|
// Part of the Carbon Language project, under the Apache License v2.0 with LLVM
// Exceptions. See /LICENSE for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#ifndef CARBON_TOOLCHAIN_BASE_BLOCK_VALUE_STORE_H_
#define CARBON_TOOLCHAIN_BASE_BLOCK_VALUE_STORE_H_
|
#include <type_traits>
#include "common/check.h"
#include "common/set.h"
#include "llvm/Support/Allocator.h"
#include "toolchain/base/mem_usage.h"
#include "toolchain/base/value_store.h"
|
#include "toolchain/base/yaml.h"
namespace Carbon::SemIR {
// Provides a block-based ValueStore, which uses slab allocation of added
// blocks. This allows references to values to outlast vector resizes that might
// otherwise invalidate references.
//
// BlockValueStore is used as-is, but there are also children that expose the
// protected members for type-specific functionality.
template <typename IdT, typename ElementT>
class BlockValueStore : public Yaml::Printable<BlockValueStore<IdT, ElementT>> {
public:
using IdType = IdT;
using ElementType = ElementT;
using RefType = llvm::MutableArrayRef<ElementT>;
using ConstRefType = llvm::ArrayRef<ElementT>;
explicit BlockValueStore(llvm::BumpPtrAllocator& allocator)
: allocator_(&allocator) {
auto empty = RefType();
auto empty_val = canonical_blocks_.Insert(
empty, [&] { return values_.Add(empty); }, KeyContext(this));
CARBON_CHECK(empty_val.key() == IdT::Empty);
}
// Adds a block with the given content, returning an ID to reference it.
auto Add(ConstRefType content) -> IdT {
if (content.empty()) {
return IdT::Empty;
}
return values_.Add(AllocateCopy(content));
}
// Returns the requested block.
auto Get(IdT id) const -> ConstRefType { return values_.Get(id); }
// Returns a mutable view of the requested block. This operation should be
// avoided where possible; we generally want blocks to be immutable once
// created.
auto GetMutable(IdT id) -> RefType { return values_.Get(id); }
// Returns a new block formed by applying `transform(elem_id)` to each element
// in the specified block.
template <typename TransformFnT>
auto Transform(IdT id, TransformFnT transform) -> IdT {
llvm::SmallVector<ElementType> block(llvm::map_range(Get(id), transform));
return Add(block);
}
// Adds a block or finds an existing canonical block with the given content,
// and returns an ID to reference it.
|
random
|
<|fim_prefix|>).first, E->get().second);
newpos += -scaling_selection_offset.x + (newpos - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
undo_redo->add_undo_method(animation.ptr(), "track_remove_key_at_time", E->get().first, newpos);
}
// 5 - (undo) Reinsert keys.
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t oldpos = animation->track_get_key_time(E->get().first, E->get().second);
Array key = animation->track_get_key_value(E->get().first, E->get().second);
undo_redo->add_undo_method(
this,
"_bezier_track_insert_key_at_anim",
animation,
E->get().first,
oldpos,
key[0],
Vector2(key[1], key[2]),
Vector2(key[3], key[4]),
animation->bezier_track_get_key_handle_mode(E->get().first, E->get().second));
}
// 6 - (undo) Reinsert overlapped keys.
List<AnimMoveRestore>::ConstIterator restore_itr = to_restore.begin();
List<Animation::HandleMode>::ConstIterator handle_itr = to_restore_handle_modes.begin();
for (; restore_itr != to_restore.end() && handle_itr != to_restore_handle_modes.end(); ++restore_itr, ++handle_itr) {
const AnimMoveRestore &amr = *restore_itr;
Array key = amr.key;
undo_redo->add_undo_method(animation.ptr(), "track_insert_key", amr.track, amr.time, amr.key, 1);
undo_redo->add_undo_method(
this,
"_bezier_track_insert_key_at_anim",
animation,
amr.track,
amr.time,
key[0],
Vector2(key[1], key[2]),
Vector2(key[3], key[4]),
*handle_itr);
}
undo_redo->add_do_method(this, "_clear_selection_for_anim", animation);
undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation);
// 7 - Reselect.
int i = 0;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t oldpos = animation->track_get_key_time(E->get().first, E->get().second);
real_t newpos = animation->track_get_key_time(E->get().first, E->get().second);
<|fim_suffix|>
undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, oldpos, i == 0);
i++;
}
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->commit_action();
}
scaling_selection = false;
scaling_selection_scale = Vector2(1, 1);
scaling_selection_offset = Vector2();
queue_redraw();
}
Ref<InputEventMouseMotion> mm = p_event;
if (moving_selection_attempt && mm.is_valid()) {
Point2 new_pos = mm->get_position();
if (mm->is_alt_pressed()) { // Axis snap key move when alt is pressed
if (Math::abs(new_pos.x - moving_selection_mouse_begin.x) > Math::abs(new_pos.y - moving_selection_mouse_begin.y)) {
new_pos.y = moving_selection_mouse_begin.y;
} else {
new_pos.x = moving_selection_mouse_begin.x;
}
}
if (!moving_selection) {
moving_selection = true;
select_single_attempt = IntPair(-1, -1);
}
if (!read_only) {
float y = (get_size().height / 2.0 - new_pos.y) * timeline_v_zoom + timeline_v_scroll;
float moving_selection_begin_time = ((moving_selection_mouse_begin.x - limit) / timeline->get_zoom_scale()) + timeline->get_value();
float new_time = ((new_pos.x - limit) / timeline->get_zoom_scale()) + timeline->get_value();
float moving_selection_pivot = moving_selection_from_key != -1 ? animation->track_get_key_time(moving_selection_from_track, moving_selection_from_key) : 0;
float time_delta = new_time - moving_selection_begin_time;
float snapped_time = editor->snap_time(moving_selection_pivot + time_delta);
float time_offset = 0.0;
if (std::abs(moving_selection_offset.x) > CMP_EPSILON || (snapped_time > moving_selection_pivot && time_delta > CMP_EPSILON) || (snapped_time < moving_selection_pivot && time_delta < -CMP_EPSILON)) {
time_offset = snapped_time - moving_selection_pivot;
}
flo<|fim_middle|>newpos += -scaling_selection_offset.x + (newpos - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
undo_redo->add_do_method(this, "_select_at_anim", animation, E->get().first, newpos, i == 0);
|
).first, E->get().second);
newpos += -scaling_selection_offset.x + (newpos - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
undo_redo->add_undo_method(animation.ptr(), "track_remove_key_at_time", E->get().first, newpos);
}
// 5 - (undo) Reinsert keys.
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t oldpos = animation->track_get_key_time(E->get().first, E->get().second);
Array key = animation->track_get_key_value(E->get().first, E->get().second);
undo_redo->add_undo_method(
this,
"_bezier_track_insert_key_at_anim",
animation,
E->get().first,
oldpos,
key[0],
Vector2(key[1], key[2]),
Vector2(key[3], key[4]),
animation->bezier_track_get_key_handle_mode(E->get().first, E->get().second));
}
// 6 - (undo) Reinsert overlapped keys.
List<AnimMoveRestore>::ConstIterator restore_itr = to_restore.begin();
List<Animation::HandleMode>::ConstIterator handle_itr = to_restore_handle_modes.begin();
for (; restore_itr != to_restore.end() && handle_itr != to_restore_handle_modes.end(); ++restore_itr, ++handle_itr) {
const AnimMoveRestore &amr = *restore_itr;
Array key = amr.key;
undo_redo->add_undo_method(animation.ptr(), "track_insert_key", amr.track, amr.time, amr.key, 1);
undo_redo->add_undo_method(
this,
"_bezier_track_insert_key_at_anim",
animation,
amr.track,
amr.time,
key[0],
Vector2(key[1], key[2]),
Vector2(key[3], key[4]),
*handle_itr);
}
undo_redo->add_do_method(this, "_clear_selection_for_anim", animation);
undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation);
// 7 - Reselect.
int i = 0;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t oldpos = animation->track_get_key_time(E->get().first, E->get().second);
real_t newpos = animation->track_get_key_time(E->get().first, E->get().second);
|
newpos += -scaling_selection_offset.x + (newpos - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
undo_redo->add_do_method(this, "_select_at_anim", animation, E->get().first, newpos, i == 0);
|
undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, oldpos, i == 0);
i++;
}
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->commit_action();
}
scaling_selection = false;
scaling_selection_scale = Vector2(1, 1);
scaling_selection_offset = Vector2();
queue_redraw();
}
Ref<InputEventMouseMotion> mm = p_event;
if (moving_selection_attempt && mm.is_valid()) {
Point2 new_pos = mm->get_position();
if (mm->is_alt_pressed()) { // Axis snap key move when alt is pressed
if (Math::abs(new_pos.x - moving_selection_mouse_begin.x) > Math::abs(new_pos.y - moving_selection_mouse_begin.y)) {
new_pos.y = moving_selection_mouse_begin.y;
} else {
new_pos.x = moving_selection_mouse_begin.x;
}
}
if (!moving_selection) {
moving_selection = true;
select_single_attempt = IntPair(-1, -1);
}
if (!read_only) {
float y = (get_size().height / 2.0 - new_pos.y) * timeline_v_zoom + timeline_v_scroll;
float moving_selection_begin_time = ((moving_selection_mouse_begin.x - limit) / timeline->get_zoom_scale()) + timeline->get_value();
float new_time = ((new_pos.x - limit) / timeline->get_zoom_scale()) + timeline->get_value();
float moving_selection_pivot = moving_selection_from_key != -1 ? animation->track_get_key_time(moving_selection_from_track, moving_selection_from_key) : 0;
float time_delta = new_time - moving_selection_begin_time;
float snapped_time = editor->snap_time(moving_selection_pivot + time_delta);
float time_offset = 0.0;
if (std::abs(moving_selection_offset.x) > CMP_EPSILON || (snapped_time > moving_selection_pivot && time_delta > CMP_EPSILON) || (snapped_time < moving_selection_pivot && time_delta < -CMP_EPSILON)) {
time_offset = snapped_time - moving_selection_pivot;
}
flo
|
ast_based
|
<|fim_prefix|>d_icon_item(get_editor_theme_icon(SNAME("BezierHandlesLinear")), TTR("Make Handles Linear"), MENU_KEY_SET_HANDLE_LINEAR);
menu->add_icon_item(get_editor_theme_icon(SNAME("BezierHandlesBalanced")), TTR("Make Handles Balanced"), MENU_KEY_SET_HANDLE_BALANCED);
menu->add_icon_item(get_editor_theme_icon(SNAME("BezierHandlesMirror")), TTR("Make Handles Mirrored"), MENU_KEY_SET_HANDLE_MIRRORED);
menu->add_separator();
menu->add_icon_item(get_editor_theme_icon(SNAME("BezierHandlesBalanced")), TTR("Make Handles Balanced (Auto Tangent)"), MENU_KEY_SET_HANDLE_AUTO_BALANCED);
menu->add_icon_item(get_editor_theme_icon(SNAME("BezierHandlesMirror")), TTR("Make Handles Mirrored (Auto Tangent)"), MENU_KEY_SET_HANDLE_AUTO_MIRRORED);
}
if (menu->get_item_count()) {
menu->reset_size();
menu->set_position(popup_pos);
menu->popup();
}
}
}
}
if (mb.is_valid() && mb->is_pressed() && mb->get_button_index() == MouseButton::LEFT) {
Point2 pos = mb->get_position();
bool no_mod_key_pressed = !mb->is_alt_pressed() && !mb->is_shift_pressed() && !mb->is_command_or_control_pressed();
if (mb->is_double_click() && !moving_selection && no_mod_key_pressed) {
int x = pos.x - timeline->get_name_limit();
float ofs = x / timeline->get_zoom_scale() + timeline->get_value();
emit_signal(SNAME("timeline_changed"), ofs, false);
}
for (const KeyValue<int, Rect2> &E : subtracks) {
if (E.value.has_point(mb->get_position())) {
if (!locked_tracks.has(E.key) && !hidden_tracks.has(E.key)) {
set_animation_and_track(animation, E.key, read_only);
_clear_selection();
}
return;
}
}
for (const KeyValue<int, RBMap<int, Rect2>> &E : subtrack_icons) {
int track = E.key;
RBMap<int, Rect2> track_icons = E.value;
for (const KeyValue<int, Rect2> &I : track_icons) {
if (I.value.has_point(mb->get_position())) {
if (I.key == REMOVE_ICON) {
if (!read_only) {
EditorUndoRedoManager *undo_redo = <|fim_suffix|>;
undo_redo->create_action("Remove Bezier Track", UndoRedo::MERGE_DISABLE, animation.ptr());
undo_redo->add_do_method(this, "_update_locked_tracks_after", track);
undo_redo->add_do_method(this, "_update_hidden_tracks_after", track);
undo_redo->add_do_method(animation.ptr(), "remove_track", track);
undo_redo->add_undo_method(animation.ptr(), "add_track", Animation::TrackType::TYPE_BEZIER, track);
undo_redo->add_undo_method(animation.ptr(), "track_set_path", track, animation->track_get_path(track));
for (int i = 0; i < animation->track_get_key_count(track); ++i) {
undo_redo->add_undo_method(
this,
"_bezier_track_insert_key_at_anim",
animation,
track,
animation->track_get_key_time(track, i),
animation->bezier_track_get_key_value(track, i),
animation->bezier_track_get_key_in_handle(track, i),
animation->bezier_track_get_key_out_handle(track, i),
animation->bezier_track_get_key_handle_mode(track, i));
}
undo_redo->commit_action();
selected_track = CLAMP(selected_track, 0, animation->get_track_count() - 1);
}
return;
} else if (I.key == LOCK_ICON) {
if (locked_tracks.has(track)) {
locked_tracks.erase(track);
} else {
locked_tracks.insert(track);
if (selected_track == track) {
for (int i = 0; i < animation->get_track_count(); ++i) {
if (!locked_tracks.has(i) && animation->track_get_type(i) == Animation::TrackType::TYPE_BEZIER) {
set_animation_and_track(animation, i, read_only);
break;
}
}
}
}
queue_redraw();
return;
} else if (I.key == VISIBILITY_ICON) {
if (hidden_tracks.has(track)) {
hidden_tracks.erase(track);
} else {
hidden_tracks.insert(track);
if (selected_track == track) {
for (int i = 0; i < animation->get_track_count(); <|fim_middle|>EditorUndoRedoManager::get_singleton()
|
d_icon_item(get_editor_theme_icon(SNAME("BezierHandlesLinear")), TTR("Make Handles Linear"), MENU_KEY_SET_HANDLE_LINEAR);
menu->add_icon_item(get_editor_theme_icon(SNAME("BezierHandlesBalanced")), TTR("Make Handles Balanced"), MENU_KEY_SET_HANDLE_BALANCED);
menu->add_icon_item(get_editor_theme_icon(SNAME("BezierHandlesMirror")), TTR("Make Handles Mirrored"), MENU_KEY_SET_HANDLE_MIRRORED);
menu->add_separator();
menu->add_icon_item(get_editor_theme_icon(SNAME("BezierHandlesBalanced")), TTR("Make Handles Balanced (Auto Tangent)"), MENU_KEY_SET_HANDLE_AUTO_BALANCED);
menu->add_icon_item(get_editor_theme_icon(SNAME("BezierHandlesMirror")), TTR("Make Handles Mirrored (Auto Tangent)"), MENU_KEY_SET_HANDLE_AUTO_MIRRORED);
}
if (menu->get_item_count()) {
menu->reset_size();
menu->set_position(popup_pos);
menu->popup();
}
}
}
}
if (mb.is_valid() && mb->is_pressed() && mb->get_button_index() == MouseButton::LEFT) {
Point2 pos = mb->get_position();
bool no_mod_key_pressed = !mb->is_alt_pressed() && !mb->is_shift_pressed() && !mb->is_command_or_control_pressed();
if (mb->is_double_click() && !moving_selection && no_mod_key_pressed) {
int x = pos.x - timeline->get_name_limit();
float ofs = x / timeline->get_zoom_scale() + timeline->get_value();
emit_signal(SNAME("timeline_changed"), ofs, false);
}
for (const KeyValue<int, Rect2> &E : subtracks) {
if (E.value.has_point(mb->get_position())) {
if (!locked_tracks.has(E.key) && !hidden_tracks.has(E.key)) {
set_animation_and_track(animation, E.key, read_only);
_clear_selection();
}
return;
}
}
for (const KeyValue<int, RBMap<int, Rect2>> &E : subtrack_icons) {
int track = E.key;
RBMap<int, Rect2> track_icons = E.value;
for (const KeyValue<int, Rect2> &I : track_icons) {
if (I.value.has_point(mb->get_position())) {
if (I.key == REMOVE_ICON) {
if (!read_only) {
EditorUndoRedoManager *undo_redo =
|
EditorUndoRedoManager::get_singleton()
|
;
undo_redo->create_action("Remove Bezier Track", UndoRedo::MERGE_DISABLE, animation.ptr());
undo_redo->add_do_method(this, "_update_locked_tracks_after", track);
undo_redo->add_do_method(this, "_update_hidden_tracks_after", track);
undo_redo->add_do_method(animation.ptr(), "remove_track", track);
undo_redo->add_undo_method(animation.ptr(), "add_track", Animation::TrackType::TYPE_BEZIER, track);
undo_redo->add_undo_method(animation.ptr(), "track_set_path", track, animation->track_get_path(track));
for (int i = 0; i < animation->track_get_key_count(track); ++i) {
undo_redo->add_undo_method(
this,
"_bezier_track_insert_key_at_anim",
animation,
track,
animation->track_get_key_time(track, i),
animation->bezier_track_get_key_value(track, i),
animation->bezier_track_get_key_in_handle(track, i),
animation->bezier_track_get_key_out_handle(track, i),
animation->bezier_track_get_key_handle_mode(track, i));
}
undo_redo->commit_action();
selected_track = CLAMP(selected_track, 0, animation->get_track_count() - 1);
}
return;
} else if (I.key == LOCK_ICON) {
if (locked_tracks.has(track)) {
locked_tracks.erase(track);
} else {
locked_tracks.insert(track);
if (selected_track == track) {
for (int i = 0; i < animation->get_track_count(); ++i) {
if (!locked_tracks.has(i) && animation->track_get_type(i) == Animation::TrackType::TYPE_BEZIER) {
set_animation_and_track(animation, i, read_only);
break;
}
}
}
}
queue_redraw();
return;
} else if (I.key == VISIBILITY_ICON) {
if (hidden_tracks.has(track)) {
hidden_tracks.erase(track);
} else {
hidden_tracks.insert(track);
if (selected_track == track) {
for (int i = 0; i < animation->get_track_count();
|
ast_based
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.