text
stringlengths 435
1.65k
| prefix
stringlengths 51
700
| middle
stringlengths 10
200
| suffix
stringlengths 50
700
| type
stringclasses 2
values |
|---|---|---|---|---|
<fim_prefix> item_rect_changed();
}
bool AnimatedSprite2D::is_centered() const {
return centered;
}
void AnimatedSprite2D::set_offset(const Point2 &p_offset) {
if (offset == p_offset) {
return;
}
offset = p_offset;
queue_redraw();
item_rect_changed();
}
Point2 AnimatedSprite2D::get_offset() const {
return offset;
}
void AnimatedSprite2D::set_flip_h(bool p_flip) {
if (hflip == p_flip) {
return;
}
hflip = p_flip;
queue_redraw();
}
bool AnimatedSprite2D::is_flipped_h() const {
return hflip;
}
void AnimatedSprite2D::set_flip_v(bool p_flip) {
if (vflip == p_flip) {
return;
}
vflip = p_flip;
queue_redraw();
}
bool AnimatedSprite2D::is_flipped_v() const {
return vflip;
}<fim_suffix> set_frame_and_progress(frame, frame_progress);
queue_redraw();
notify_property_list_changed();
}
bool AnimatedSprite2D::is_playing() const {
return playing;
}
void AnimatedSprite2D::set_autoplay(const String &p_name) {
if (is_inside_tree() && !Engine::get_singleton()->is_editor_hint()) {
WARN_PRINT("Setting autoplay after the node has been added to the scene has no effect.");
}
autoplay = p_name;
}
String AnimatedSprite2D::get_autoplay() const {
return autoplay;
}
void AnimatedSprite2D::play(const StringName &p_name, float p_custom_scale, bool p_from_end) {
StringName name = p_name;
if (name == StringName()) {
name = animation;
}
<fim_middle>
void AnimatedSprite2D::_res_changed() {<fim_end>
|
item_rect_changed();
}
bool AnimatedSprite2D::is_centered() const {
return centered;
}
void AnimatedSprite2D::set_offset(const Point2 &p_offset) {
if (offset == p_offset) {
return;
}
offset = p_offset;
queue_redraw();
item_rect_changed();
}
Point2 AnimatedSprite2D::get_offset() const {
return offset;
}
void AnimatedSprite2D::set_flip_h(bool p_flip) {
if (hflip == p_flip) {
return;
}
hflip = p_flip;
queue_redraw();
}
bool AnimatedSprite2D::is_flipped_h() const {
return hflip;
}
void AnimatedSprite2D::set_flip_v(bool p_flip) {
if (vflip == p_flip) {
return;
}
vflip = p_flip;
queue_redraw();
}
bool AnimatedSprite2D::is_flipped_v() const {
return vflip;
}
|
void AnimatedSprite2D::_res_changed() {
|
set_frame_and_progress(frame, frame_progress);
queue_redraw();
notify_property_list_changed();
}
bool AnimatedSprite2D::is_playing() const {
return playing;
}
void AnimatedSprite2D::set_autoplay(const String &p_name) {
if (is_inside_tree() && !Engine::get_singleton()->is_editor_hint()) {
WARN_PRINT("Setting autoplay after the node has been added to the scene has no effect.");
}
autoplay = p_name;
}
String AnimatedSprite2D::get_autoplay() const {
return autoplay;
}
void AnimatedSprite2D::play(const StringName &p_name, float p_custom_scale, bool p_from_end) {
StringName name = p_name;
if (name == StringName()) {
name = animation;
}
|
random
|
<fim_prefix>f_tmp.push_back(empty);
auto t = ggml_new_tensor_1d(ctx_ggml, GGML_TYPE_F32, n_embd);
t->data = malloc(ggml_nbytes(t)); // TODO: get rid of malloc if possible
v_final.push_back(t);
}
}
// add new rows into existing tensor in v_diff_tmp
void concat_diff_tmp(const std::vector<struct ggml_tensor *> & diff_filtered) {
GGML_ASSERT((int) diff_filtered.size() == n_layers - 1);
for (int il = 0; il < n_layers - 1; il++) {
auto t = diff_filtered[il];
auto & diff_tmp = v_diff_tmp[il];
size_t curr_size = diff_tmp.size();
diff_tmp.resize(curr_size + ggml_nbytes(t));
memcpy(<fim_suffix> + curr_size, t->data, ggml_nbytes(t));
}
}
// build the v_diff tensors from v_diff_tmp (v_diff need to be transposed)
// TODO @ngxson : maybe add option NOT to transpose v_diff; will be useful for "mean" method
void build_v_diff(bool transpose) {
printf("build_v_diff\n");
for (int il = 0; il < n_layers - 1; il++) {
auto & diff_tmp = v_diff_tmp[il];
int n_elem = diff_tmp.size() / sizeof(float);
GGML_ASSERT(n_elem % n_embd == 0);
int n_rows = n_elem / n_embd;
struct ggml_tensor * diff = transpose
? ggml_new_tensor_2d(ctx_ggml, GGML_TYPE_F32, n_rows, n_embd)
: gg<fim_middle>diff_tmp.data()<fim_end>
|
f_tmp.push_back(empty);
auto t = ggml_new_tensor_1d(ctx_ggml, GGML_TYPE_F32, n_embd);
t->data = malloc(ggml_nbytes(t)); // TODO: get rid of malloc if possible
v_final.push_back(t);
}
}
// add new rows into existing tensor in v_diff_tmp
void concat_diff_tmp(const std::vector<struct ggml_tensor *> & diff_filtered) {
GGML_ASSERT((int) diff_filtered.size() == n_layers - 1);
for (int il = 0; il < n_layers - 1; il++) {
auto t = diff_filtered[il];
auto & diff_tmp = v_diff_tmp[il];
size_t curr_size = diff_tmp.size();
diff_tmp.resize(curr_size + ggml_nbytes(t));
memcpy(
|
diff_tmp.data()
|
+ curr_size, t->data, ggml_nbytes(t));
}
}
// build the v_diff tensors from v_diff_tmp (v_diff need to be transposed)
// TODO @ngxson : maybe add option NOT to transpose v_diff; will be useful for "mean" method
void build_v_diff(bool transpose) {
printf("build_v_diff\n");
for (int il = 0; il < n_layers - 1; il++) {
auto & diff_tmp = v_diff_tmp[il];
int n_elem = diff_tmp.size() / sizeof(float);
GGML_ASSERT(n_elem % n_embd == 0);
int n_rows = n_elem / n_embd;
struct ggml_tensor * diff = transpose
? ggml_new_tensor_2d(ctx_ggml, GGML_TYPE_F32, n_rows, n_embd)
: gg
|
ast_based
|
<fim_prefix>
const char * fn_llama2c_output_model;
const char * fn_train_data;
const char * fn_checkpoint_in;
const char * fn_checkpoint_out;
const char * fn_model_out;
uint32_t seed;
int n_ctx;
int n_embd;
int n_mult;
int n_head;
int n_layer;
int n_rotmax;
int n_threads;
int n_batch;
int n_examples;
int n_predict;
int print_info_interval;
int print_details_interval;
bool samples_start_after_nl;
bool use_adam;
bool use_flash;
bool use_scratch;
// only adam
int warmup;
int cos_decay_steps;
float cos_decay_restart;
float cos_decay_alpha;
int lbfgs_n_iter;
int adam_n_iter;
<fim_suffix>
float adam_decay;
int mem_model_gb;
int mem_compute_gb;
int mem_compute0_gb;
int mem_compute1_gb;
};
static void print_params(struct my_llama_hparams * params) {
LOG_INF("%s: n_vocab: %u\n", __func__, params->n_vocab);
LOG_INF("%s: n_ctx: %u\n", __func__, params->n_ctx);
LOG_INF("%s: n_embd: %u\n", __func__, params->n_embd);
LOG_INF("%s: n_mult: %u\n", __func__, params->n_mult);
LOG_INF("%s: n_head: %u\n", __func__, params->n_head);
LOG_INF("%s: n_head_kv: %u\n", __func__, params->n_head_kv);
LOG_INF("%s: n_ff: %u\n", __func__, params->n_ff);
LOG_INF("%s: n_layer: %u\n", __func__, params->n_layer);
LOG_INF("%s: <fim_middle>float adam_alpha;<fim_end>
|
const char * fn_llama2c_output_model;
const char * fn_train_data;
const char * fn_checkpoint_in;
const char * fn_checkpoint_out;
const char * fn_model_out;
uint32_t seed;
int n_ctx;
int n_embd;
int n_mult;
int n_head;
int n_layer;
int n_rotmax;
int n_threads;
int n_batch;
int n_examples;
int n_predict;
int print_info_interval;
int print_details_interval;
bool samples_start_after_nl;
bool use_adam;
bool use_flash;
bool use_scratch;
// only adam
int warmup;
int cos_decay_steps;
float cos_decay_restart;
float cos_decay_alpha;
int lbfgs_n_iter;
int adam_n_iter;
|
float adam_alpha;
|
float adam_decay;
int mem_model_gb;
int mem_compute_gb;
int mem_compute0_gb;
int mem_compute1_gb;
};
static void print_params(struct my_llama_hparams * params) {
LOG_INF("%s: n_vocab: %u\n", __func__, params->n_vocab);
LOG_INF("%s: n_ctx: %u\n", __func__, params->n_ctx);
LOG_INF("%s: n_embd: %u\n", __func__, params->n_embd);
LOG_INF("%s: n_mult: %u\n", __func__, params->n_mult);
LOG_INF("%s: n_head: %u\n", __func__, params->n_head);
LOG_INF("%s: n_head_kv: %u\n", __func__, params->n_head_kv);
LOG_INF("%s: n_ff: %u\n", __func__, params->n_ff);
LOG_INF("%s: n_layer: %u\n", __func__, params->n_layer);
LOG_INF("%s:
|
ast_based
|
<fim_prefix>.result);
}
TEST_SUCCESS();
}
template<std::invocable<hex::prv::Provider *&, u64, size_t, u32, u32, u32, bool, bool> Func>
int checkCrcAgainstRandomData(Func func, int width) {
// crc( message + crc(message) ) should be 0
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution distribLen(0, 1024);
std::uniform_int_distribution<uint64_t> distribPoly(0, (0b10ull << (width - 1)) - 1);
std::uniform_int_distribution<u8> distribData;
for (int i = 0; i < 500; i++) {
CrcCheck c { "", width, distribPoly(gen), distribPoly(gen), 0, false, false, 0, {} };
c.data.resize(distribLen(gen));
std::generate(std::begin(c.data), <fim_suffix>, [&] { return distribData(gen); });
hex::test::TestProvider testprovider(&c.data);
hex::prv::Provider *provider = &testprovider;
u32 crc1 = func(provider, 0, c.data.size(), c.poly, c.init, c.xorOut, c.refIn, c.refOut);
std::vector<u8> data2 = c.data;
if (width >= 32) {
data2.push_back((crc1 >> 24) & 0xff);
data2.push_back((crc1 >> 16) & 0xff);
}
if (width >= 16)
data2.push_back((crc1 >> 8) & 0xff);
data2.push_back((crc1 >> 0) & 0xff);
hex::test::TestProvider testprovider2(&data2);
hex::prv::Provider *provider2 = &testprovider2;
u32 crc2 <fim_middle>std::end(c.data)<fim_end>
|
.result);
}
TEST_SUCCESS();
}
template<std::invocable<hex::prv::Provider *&, u64, size_t, u32, u32, u32, bool, bool> Func>
int checkCrcAgainstRandomData(Func func, int width) {
// crc( message + crc(message) ) should be 0
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution distribLen(0, 1024);
std::uniform_int_distribution<uint64_t> distribPoly(0, (0b10ull << (width - 1)) - 1);
std::uniform_int_distribution<u8> distribData;
for (int i = 0; i < 500; i++) {
CrcCheck c { "", width, distribPoly(gen), distribPoly(gen), 0, false, false, 0, {} };
c.data.resize(distribLen(gen));
std::generate(std::begin(c.data),
|
std::end(c.data)
|
, [&] { return distribData(gen); });
hex::test::TestProvider testprovider(&c.data);
hex::prv::Provider *provider = &testprovider;
u32 crc1 = func(provider, 0, c.data.size(), c.poly, c.init, c.xorOut, c.refIn, c.refOut);
std::vector<u8> data2 = c.data;
if (width >= 32) {
data2.push_back((crc1 >> 24) & 0xff);
data2.push_back((crc1 >> 16) & 0xff);
}
if (width >= 16)
data2.push_back((crc1 >> 8) & 0xff);
data2.push_back((crc1 >> 0) & 0xff);
hex::test::TestProvider testprovider2(&data2);
hex::prv::Provider *provider2 = &testprovider2;
u32 crc2
|
ast_based
|
<fim_prefix> return m_temporary;
}
/**
* @brief Sets whether the achievement is unlocked
* @param unlocked Whether the achievement is unlocked
*/
void setUnlocked(bool unlocked) {
if (unlocked) {
if (m_progress < m_maxProgress)
m_progress++;
} else {
m_progress = 0;
}
}
protected:
void setProgress(u32 progress) {
m_progress = progress;
}
private:
UnlocalizedString m_unlocalizedCategory, m_unlocalizedName;
UnlocalizedString m_unlocalizedDescription;
bool m_blacked = false;<fim_suffix> std::vector<std::string> m_requirements, m_visibilityRequirements;
std::function<void(Achievement &)> m_clickCallback;
std::vector<u8> m_iconData;
mutable ImGuiExt::Texture m_icon;
u32 m_progress = 0;
u32 m_maxProgress = 1;
bool m_temporary = false;
friend class AchievementManager;
};
class AchievementManager {
static bool s_initialized;
public:
AchievementManager() = delete;
struct AchievementNode {
Achievement *achievement;
std::vector<AchievementNode*> children, parents;
std::vector<AchievementNode*> visibilityParents;
ImVec2 position;<fim_middle> bool m_invisible = false;<fim_end>
|
return m_temporary;
}
/**
* @brief Sets whether the achievement is unlocked
* @param unlocked Whether the achievement is unlocked
*/
void setUnlocked(bool unlocked) {
if (unlocked) {
if (m_progress < m_maxProgress)
m_progress++;
} else {
m_progress = 0;
}
}
protected:
void setProgress(u32 progress) {
m_progress = progress;
}
private:
UnlocalizedString m_unlocalizedCategory, m_unlocalizedName;
UnlocalizedString m_unlocalizedDescription;
bool m_blacked = false;
|
bool m_invisible = false;
|
std::vector<std::string> m_requirements, m_visibilityRequirements;
std::function<void(Achievement &)> m_clickCallback;
std::vector<u8> m_iconData;
mutable ImGuiExt::Texture m_icon;
u32 m_progress = 0;
u32 m_maxProgress = 1;
bool m_temporary = false;
friend class AchievementManager;
};
class AchievementManager {
static bool s_initialized;
public:
AchievementManager() = delete;
struct AchievementNode {
Achievement *achievement;
std::vector<AchievementNode*> children, parents;
std::vector<AchievementNode*> visibilityParents;
ImVec2 position;
|
random
|
<fim_prefix> }
// 6 - (undo) Reinsert overlapped keys.
List<AnimMoveRestore>::ConstIterator restore_itr = to_restore.begin();
List<Animation::HandleMode>::ConstIterator handle_itr = to_restore_handle_modes.begin();
for (; restore_itr != to_restore.end() && handle_itr != to_restore_handle_modes.end(); ++restore_itr, ++handle_itr) {
const AnimMoveRestore &amr = *restore_itr;
Array key = amr.key;
undo_redo->add_undo_method(animation.ptr(), "track_insert_key", amr.track, amr.time, amr.key, 1);
undo_redo->add_undo_method(
this,
"_bezier_track_insert_key_at_anim",
animation,<fim_suffix> *handle_itr);
}
undo_redo->add_do_method(this, "_clear_selection_for_anim", animation);
undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation);
// 7 - Reselect.
int i = 0;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t oldpos = animation->track_get_key_time(E->get().first, E->get().second);
real_t newpos = oldpos + moving_selection_offset.x;
undo_redo->add_do_method(this, "_select_at_anim", animation, E->get().first, newpos, i == 0);
undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, oldpos, i == 0);
i++;
}
<fim_middle> amr.track,
amr.time,
key[0],
Vector2(key[1], key[2]),
Vector2(key[3], key[4]),<fim_end>
|
}
// 6 - (undo) Reinsert overlapped keys.
List<AnimMoveRestore>::ConstIterator restore_itr = to_restore.begin();
List<Animation::HandleMode>::ConstIterator handle_itr = to_restore_handle_modes.begin();
for (; restore_itr != to_restore.end() && handle_itr != to_restore_handle_modes.end(); ++restore_itr, ++handle_itr) {
const AnimMoveRestore &amr = *restore_itr;
Array key = amr.key;
undo_redo->add_undo_method(animation.ptr(), "track_insert_key", amr.track, amr.time, amr.key, 1);
undo_redo->add_undo_method(
this,
"_bezier_track_insert_key_at_anim",
animation,
|
amr.track,
amr.time,
key[0],
Vector2(key[1], key[2]),
Vector2(key[3], key[4]),
|
*handle_itr);
}
undo_redo->add_do_method(this, "_clear_selection_for_anim", animation);
undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation);
// 7 - Reselect.
int i = 0;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t oldpos = animation->track_get_key_time(E->get().first, E->get().second);
real_t newpos = oldpos + moving_selection_offset.x;
undo_redo->add_do_method(this, "_select_at_anim", animation, E->get().first, newpos, i == 0);
undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, oldpos, i == 0);
i++;
}
|
random
|
<fim_prefix> return -1;
}
tesseract_->PrepareForPageseg();
#ifndef DISABLED_LEGACY_ENGINE
if (tesseract_->textord_equation_detect) {
if (equ_detect_ == nullptr && !datapath_.empty()) {
equ_detect_ = new EquationDetect(datapath_.c_str(), nullptr);
}
if (equ_detect_ == nullptr) {
tprintf("Warning: Could not set equation detector\n");
} else {
tesseract_->SetEquationDetect(equ_detect_);
}
}
#endif // ndef DISABLED_LEGACY_ENGINE
Tesseract *osd_tess = osd_tesseract_;
OSResults osr;
#ifndef DISABLED_LEGACY_ENGINE
if (PSM_OSD_ENABLED(tesseract_->tessedit_pageseg_mode) && osd_tess == nullptr) {
if (strcmp(language_.c_str(), "osd") == 0) {<fim_suffix> if (datapath_.empty()) {
tprintf(
"Warning: Auto orientation and script detection requested,"
" but data path is undefined\n");
delete osd_tesseract_;
osd_tesseract_ = nullptr;
} else if (osd_tesseract_->init_tesseract(datapath_, "", "osd", OEM_TESSERACT_ONLY,
nullptr, 0, nullptr, nullptr, false, &mgr) == 0) {
osd_tess = osd_tesseract_;
osd_tesseract_->set_source_resolution(thresholder_->GetSourceYResolution());
} else {
tprintf(
"Warning: Auto orientation and script detection requested,"
" but osd language failed to load\n");<fim_middle> osd_tess = tesseract_;
} else {
osd_tesseract_ = new Tesseract;
TessdataManager mgr(reader_);<fim_end>
|
return -1;
}
tesseract_->PrepareForPageseg();
#ifndef DISABLED_LEGACY_ENGINE
if (tesseract_->textord_equation_detect) {
if (equ_detect_ == nullptr && !datapath_.empty()) {
equ_detect_ = new EquationDetect(datapath_.c_str(), nullptr);
}
if (equ_detect_ == nullptr) {
tprintf("Warning: Could not set equation detector\n");
} else {
tesseract_->SetEquationDetect(equ_detect_);
}
}
#endif // ndef DISABLED_LEGACY_ENGINE
Tesseract *osd_tess = osd_tesseract_;
OSResults osr;
#ifndef DISABLED_LEGACY_ENGINE
if (PSM_OSD_ENABLED(tesseract_->tessedit_pageseg_mode) && osd_tess == nullptr) {
if (strcmp(language_.c_str(), "osd") == 0) {
|
osd_tess = tesseract_;
} else {
osd_tesseract_ = new Tesseract;
TessdataManager mgr(reader_);
|
if (datapath_.empty()) {
tprintf(
"Warning: Auto orientation and script detection requested,"
" but data path is undefined\n");
delete osd_tesseract_;
osd_tesseract_ = nullptr;
} else if (osd_tesseract_->init_tesseract(datapath_, "", "osd", OEM_TESSERACT_ONLY,
nullptr, 0, nullptr, nullptr, false, &mgr) == 0) {
osd_tess = osd_tesseract_;
osd_tesseract_->set_source_resolution(thresholder_->GetSourceYResolution());
} else {
tprintf(
"Warning: Auto orientation and script detection requested,"
" but osd language failed to load\n");
|
random
|
<fim_prefix>
ggml_set_name(layer.ffn_norm, (layers_i + ".ffn_norm.weight").c_str());
ggml_format_name(layer.w1, "%s.feed_forward.w1.weight", layers_i.c_str());
ggml_format_name(layer.w2, "%s.feed_forward.w2.weight", layers_i.c_str());
ggml_format_name(layer.w3, "%s.feed_forward.w3.weight", layers_i.c_str());
}
print_tensor_info(ctx);
}
static float get_f32_2d(struct ggml_tensor * tensor, int64_t i0, int64_t i1) {
float * ptr = (float *) ((char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1]);
return *ptr;
}
static int32_t get_i32_2d(struct ggml_tensor * tensor, int64_t i0, int64_t i1) {<fim_suffix>
static void print_row(struct ggml_tensor * probs, int i) {
for (int k = 0; k < probs->ne[0]; ++k) {
float p = get_f32_2d(probs, k, i);
LOG(" %f", p);
}
LOG("\n");
}
static void print_matrix(struct ggml_tensor * probs) {
assert(ggml_is_matrix(probs));
for (int i = 0; i < probs->ne[1]; ++i) {
for (int k = 0; k < probs->ne[0]; ++k) {
float p = get_f32_2d(probs, k, i);
LOG(" %.2f", p);
}
LOG("\n");
}
}
struct my_llama_file {
// use FILE * so we don't have to re-open the file to mmap
FILE * fp;
size_t size;
my_llama_file(const char * fname, const char * mode) {<fim_middle> int32_t * ptr = (int32_t *) ((char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1]);
return *ptr;
}<fim_end>
|
ggml_set_name(layer.ffn_norm, (layers_i + ".ffn_norm.weight").c_str());
ggml_format_name(layer.w1, "%s.feed_forward.w1.weight", layers_i.c_str());
ggml_format_name(layer.w2, "%s.feed_forward.w2.weight", layers_i.c_str());
ggml_format_name(layer.w3, "%s.feed_forward.w3.weight", layers_i.c_str());
}
print_tensor_info(ctx);
}
static float get_f32_2d(struct ggml_tensor * tensor, int64_t i0, int64_t i1) {
float * ptr = (float *) ((char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1]);
return *ptr;
}
static int32_t get_i32_2d(struct ggml_tensor * tensor, int64_t i0, int64_t i1) {
|
int32_t * ptr = (int32_t *) ((char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1]);
return *ptr;
}
|
static void print_row(struct ggml_tensor * probs, int i) {
for (int k = 0; k < probs->ne[0]; ++k) {
float p = get_f32_2d(probs, k, i);
LOG(" %f", p);
}
LOG("\n");
}
static void print_matrix(struct ggml_tensor * probs) {
assert(ggml_is_matrix(probs));
for (int i = 0; i < probs->ne[1]; ++i) {
for (int k = 0; k < probs->ne[0]; ++k) {
float p = get_f32_2d(probs, k, i);
LOG(" %.2f", p);
}
LOG("\n");
}
}
struct my_llama_file {
// use FILE * so we don't have to re-open the file to mmap
FILE * fp;
size_t size;
my_llama_file(const char * fname, const char * mode) {
|
random
|
<fim_prefix> i);
gguf_add_tensor(ctx, layer.ffn_norm);
}
gguf_write_to_file(ctx, filename, false);
gguf_free(ctx);
}
static struct train_params get_default_train_params() {
struct train_params params;
params.fn_vocab_model = "models/7B/ggml-model-f16.gguf";
params.fn_llama2c_output_model = "ak_llama_model.bin";
params.fn_train_data = "shakespeare.txt";
params.fn_checkpoint_in = "checkpoint.bin";
params.fn_checkpoint_out = "checkpoint.bin";
params.fn_model_out = "ggml-checkpoint-f32.bin";
params.seed = -1;
params.n_ctx = 128;
params.n_embd = 256;
params.n_mult = 256;
<fim_suffix>;
params.n_layer = 16;
params.n_rotmax = 64;
params.n_threads = 6;
params.n_batch = 8;
params.n_examples = 8;
params.n_predict = 1024;
params.print_info_interval = 1;
params.print_details_interval = 2;
params.samples_start_after_nl = false;
params.use_adam = true;
params.use_flash = false;
params.use_scratch = true;
// only adam
params.warmup = 100;
params.cos_decay_steps = 1000;
params.cos_decay_restart = 1.1f;
params.cos_decay_alpha = 0.0f;
params.lbfgs_n_iter = 16;
params.adam_n_iter = 16;
params.adam_alpha =<fim_middle>params.n_head = 8<fim_end>
|
i);
gguf_add_tensor(ctx, layer.ffn_norm);
}
gguf_write_to_file(ctx, filename, false);
gguf_free(ctx);
}
static struct train_params get_default_train_params() {
struct train_params params;
params.fn_vocab_model = "models/7B/ggml-model-f16.gguf";
params.fn_llama2c_output_model = "ak_llama_model.bin";
params.fn_train_data = "shakespeare.txt";
params.fn_checkpoint_in = "checkpoint.bin";
params.fn_checkpoint_out = "checkpoint.bin";
params.fn_model_out = "ggml-checkpoint-f32.bin";
params.seed = -1;
params.n_ctx = 128;
params.n_embd = 256;
params.n_mult = 256;
|
params.n_head = 8
|
;
params.n_layer = 16;
params.n_rotmax = 64;
params.n_threads = 6;
params.n_batch = 8;
params.n_examples = 8;
params.n_predict = 1024;
params.print_info_interval = 1;
params.print_details_interval = 2;
params.samples_start_after_nl = false;
params.use_adam = true;
params.use_flash = false;
params.use_scratch = true;
// only adam
params.warmup = 100;
params.cos_decay_steps = 1000;
params.cos_decay_restart = 1.1f;
params.cos_decay_alpha = 0.0f;
params.lbfgs_n_iter = 16;
params.adam_n_iter = 16;
params.adam_alpha =
|
ast_based
|
<fim_prefix> const std::unique_ptr<const PageIterator> it(AnalyseLayout());
if (it == nullptr) {
return false;
}
int x1, x2, y1, y2;
it->Baseline(RIL_TEXTLINE, &x1, &y1, &x2, &y2);
// Calculate offset and slope (NOTE: Kind of ugly)
if (x2 <= x1) {
x2 = x1 + 1;
}
// Convert the point pair to slope/offset of the baseline (in image coords.)
*out_slope = static_cast<float>(y2 - y1) / (x2 - x1);
*out_offset = static_cast<int>(y1 - *out_slope * x1);
// Get the y-coord of the baseline at the left and right edges of the
// textline's bounding box.
int left, top, right, bottom;
if (!it->BoundingBox(RIL_TEXTLINE, &left, &top, &right, &bottom)) {
return false;<fim_suffix> // Shift the baseline down so it passes through the nearest bottom-corner
// of the textline's bounding box. This is the difference between the y
// at the lowest (max) edge of the box and the actual box bottom.
*out_offset += bottom - std::max(left_y, right_y);
// Switch back to bottom-up tesseract coordinates. Requires negation of
// the slope and height - offset for the offset.
*out_slope = -*out_slope;
*out_offset = rect_height_ - *out_offset;
return true;
}
/** Sets Dict::letter_is_okay_ function to point to the given function. */
void TessBaseAPI::SetDictFunc(DictFunc f) {
if (tesseract_ != nullptr) {
tesseract_->getDict().letter_is_okay_ = f;
}
}
/**<fim_middle> }
int left_y = IntCastRounded(*out_slope * left + *out_offset);
int right_y = IntCastRounded(*out_slope * right + *out_offset);<fim_end>
|
const std::unique_ptr<const PageIterator> it(AnalyseLayout());
if (it == nullptr) {
return false;
}
int x1, x2, y1, y2;
it->Baseline(RIL_TEXTLINE, &x1, &y1, &x2, &y2);
// Calculate offset and slope (NOTE: Kind of ugly)
if (x2 <= x1) {
x2 = x1 + 1;
}
// Convert the point pair to slope/offset of the baseline (in image coords.)
*out_slope = static_cast<float>(y2 - y1) / (x2 - x1);
*out_offset = static_cast<int>(y1 - *out_slope * x1);
// Get the y-coord of the baseline at the left and right edges of the
// textline's bounding box.
int left, top, right, bottom;
if (!it->BoundingBox(RIL_TEXTLINE, &left, &top, &right, &bottom)) {
return false;
|
}
int left_y = IntCastRounded(*out_slope * left + *out_offset);
int right_y = IntCastRounded(*out_slope * right + *out_offset);
|
// Shift the baseline down so it passes through the nearest bottom-corner
// of the textline's bounding box. This is the difference between the y
// at the lowest (max) edge of the box and the actual box bottom.
*out_offset += bottom - std::max(left_y, right_y);
// Switch back to bottom-up tesseract coordinates. Requires negation of
// the slope and height - offset for the offset.
*out_slope = -*out_slope;
*out_offset = rect_height_ - *out_offset;
return true;
}
/** Sets Dict::letter_is_okay_ function to point to the given function. */
void TessBaseAPI::SetDictFunc(DictFunc f) {
if (tesseract_ != nullptr) {
tesseract_->getDict().letter_is_okay_ = f;
}
}
/**
|
random
|
<fim_prefix>K_EQ(ProjectSettings::get_singleton()->localize_path("..\\path\\filename"), "../path/filename");
CHECK_EQ(ProjectSettings::get_singleton()->localize_path("/testroot/filename"), "/testroot/filename");
CHECK_EQ(ProjectSettings::get_singleton()->localize_path("/testroot/path/filename"), "/testroot/path/filename");
CHECK_EQ(ProjectSettings::get_singleton()->localize_path("/testroot/path/something/../filename"), "/testroot/path/filename");
CHECK_EQ(ProjectSettings::get_singleton()->localize_path("/testroot/path/./filename"), "/testroot/path/filename");
#ifdef WINDOWS_ENABLED
CHECK_EQ(ProjectSettings::get_singleton()->localize_path("C:/testroot/filename"), "C:/testroot/filename");
CHECK_EQ(<fim_suffix>, "C:/testroot/path/filename");
CHECK_EQ(ProjectSettings::get_singleton()->localize_path("C:/testroot/path/something/../filename"), "C:/testroot/path/filename");
CHECK_EQ(ProjectSettings::get_singleton()->localize_path("C:/testroot/path/./filename"), "C:/testroot/path/filename");
CHECK_EQ(ProjectSettings::get_singleton()->localize_path("C:\\testroot\\filename"), "C:/testroot/filename");
CHECK_EQ(ProjectSettings::get_singleton()->localize_path("C:\\testroot\\path\\filename"), "C:/testroot/path/filename");
CHECK_EQ(ProjectSettings::get_singleton()->localize_path("C:\\testroot\\path\\something\\..\\filename"), "C:/testroot/path/filename");
CHECK_EQ(ProjectSettings::get_singleton()->locali<fim_middle>ProjectSettings::get_singleton()->localize_path("C:/testroot/path/filename")<fim_end>
|
K_EQ(ProjectSettings::get_singleton()->localize_path("..\\path\\filename"), "../path/filename");
CHECK_EQ(ProjectSettings::get_singleton()->localize_path("/testroot/filename"), "/testroot/filename");
CHECK_EQ(ProjectSettings::get_singleton()->localize_path("/testroot/path/filename"), "/testroot/path/filename");
CHECK_EQ(ProjectSettings::get_singleton()->localize_path("/testroot/path/something/../filename"), "/testroot/path/filename");
CHECK_EQ(ProjectSettings::get_singleton()->localize_path("/testroot/path/./filename"), "/testroot/path/filename");
#ifdef WINDOWS_ENABLED
CHECK_EQ(ProjectSettings::get_singleton()->localize_path("C:/testroot/filename"), "C:/testroot/filename");
CHECK_EQ(
|
ProjectSettings::get_singleton()->localize_path("C:/testroot/path/filename")
|
, "C:/testroot/path/filename");
CHECK_EQ(ProjectSettings::get_singleton()->localize_path("C:/testroot/path/something/../filename"), "C:/testroot/path/filename");
CHECK_EQ(ProjectSettings::get_singleton()->localize_path("C:/testroot/path/./filename"), "C:/testroot/path/filename");
CHECK_EQ(ProjectSettings::get_singleton()->localize_path("C:\\testroot\\filename"), "C:/testroot/filename");
CHECK_EQ(ProjectSettings::get_singleton()->localize_path("C:\\testroot\\path\\filename"), "C:/testroot/path/filename");
CHECK_EQ(ProjectSettings::get_singleton()->localize_path("C:\\testroot\\path\\something\\..\\filename"), "C:/testroot/path/filename");
CHECK_EQ(ProjectSettings::get_singleton()->locali
|
ast_based
|
<fim_prefix> }
}
undo_redo->add_do_method(this, "_clear_selection_for_anim", animation);
undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation);
// Reselect duplicated.
int i = 0;
for (const Pair<int, real_t> &E : new_selection_values) {
undo_redo->add_do_method(this, "_select_at_anim", animation, E.first, E.second, i == 0);
i++;
}
i = 0;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t time = animation->track_get_key_time(E->get().first, E->get().second);
undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, time, i == 0);
i++;
}
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();<fim_suffix> undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->add_do_method(this, "queue_redraw");
undo_redo->add_undo_method(this, "queue_redraw");
undo_redo->commit_action();
}
void AnimationBezierTrackEdit::copy_selected_keys(bool p_cut) {
if (selection.is_empty()) {
return;
}
float top_time = 1e10;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
float t = animation->track_get_key_time(E->get().first, E->get().second);
if (t < top_time) {
top_time = t;
}
}
RBMap<AnimationTrackEditor::SelectedKey, AnimationTrackEditor::KeyInfo> keys;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {<fim_middle> if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");<fim_end>
|
}
}
undo_redo->add_do_method(this, "_clear_selection_for_anim", animation);
undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation);
// Reselect duplicated.
int i = 0;
for (const Pair<int, real_t> &E : new_selection_values) {
undo_redo->add_do_method(this, "_select_at_anim", animation, E.first, E.second, i == 0);
i++;
}
i = 0;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t time = animation->track_get_key_time(E->get().first, E->get().second);
undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, time, i == 0);
i++;
}
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
|
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
|
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->add_do_method(this, "queue_redraw");
undo_redo->add_undo_method(this, "queue_redraw");
undo_redo->commit_action();
}
void AnimationBezierTrackEdit::copy_selected_keys(bool p_cut) {
if (selection.is_empty()) {
return;
}
float top_time = 1e10;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
float t = animation->track_get_key_time(E->get().first, E->get().second);
if (t < top_time) {
top_time = t;
}
}
RBMap<AnimationTrackEditor::SelectedKey, AnimationTrackEditor::KeyInfo> keys;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
|
random
|
<fim_prefix>
* not be included in this list. To find the languages actually
* loaded use GetLoadedLanguagesAsVector.
* The returned string should NOT be deleted.
*/
const char *TessBaseAPI::GetInitLanguagesAsString() const {
return language_.c_str();
}
/**
* Returns the loaded languages in the vector of std::string.
* Includes all languages loaded by the last Init, including those loaded
* as dependencies of other loaded languages.
*/
void TessBaseAPI::GetLoadedLanguagesAsVector(std::vector<std::string> *langs) const {
langs->clear();
if (tesseract_ != nullptr) {
langs->push_back(tesseract_->lang);
int num_subs = tesseract_->num_sub_langs();
for (int i = 0; i < num_subs; ++i) <fim_suffix>
}
}
/**
* Returns the available languages in the sorted vector of std::string.
*/
void TessBaseAPI::GetAvailableLanguagesAsVector(std::vector<std::string> *langs) const {
langs->clear();
if (tesseract_ != nullptr) {
addAvailableLanguages(tesseract_->datadir, langs);
std::sort(langs->begin(), langs->end());
}
}
/**
* Init only for page layout analysis. Use only for calls to SetImage and
* AnalysePage. Calls that attempt recognition will generate an error.
*/
void TessBaseAPI::InitForAnalysePage() {
if (tesseract_ == nullptr) {
tesseract_ = new Tesseract;
#ifndef DISABLED_LEGACY_ENGINE
tesseract_->InitAdaptiveClassifier(nullptr);
#endif
}
}
/**
* Read a "c<fim_middle>{
langs->push_back(tesseract_->get_sub_lang(i)->lang);
}<fim_end>
|
* not be included in this list. To find the languages actually
* loaded use GetLoadedLanguagesAsVector.
* The returned string should NOT be deleted.
*/
const char *TessBaseAPI::GetInitLanguagesAsString() const {
return language_.c_str();
}
/**
* Returns the loaded languages in the vector of std::string.
* Includes all languages loaded by the last Init, including those loaded
* as dependencies of other loaded languages.
*/
void TessBaseAPI::GetLoadedLanguagesAsVector(std::vector<std::string> *langs) const {
langs->clear();
if (tesseract_ != nullptr) {
langs->push_back(tesseract_->lang);
int num_subs = tesseract_->num_sub_langs();
for (int i = 0; i < num_subs; ++i)
|
{
langs->push_back(tesseract_->get_sub_lang(i)->lang);
}
|
}
}
/**
* Returns the available languages in the sorted vector of std::string.
*/
void TessBaseAPI::GetAvailableLanguagesAsVector(std::vector<std::string> *langs) const {
langs->clear();
if (tesseract_ != nullptr) {
addAvailableLanguages(tesseract_->datadir, langs);
std::sort(langs->begin(), langs->end());
}
}
/**
* Init only for page layout analysis. Use only for calls to SetImage and
* AnalysePage. Calls that attempt recognition will generate an error.
*/
void TessBaseAPI::InitForAnalysePage() {
if (tesseract_ == nullptr) {
tesseract_ = new Tesseract;
#ifndef DISABLED_LEGACY_ENGINE
tesseract_->InitAdaptiveClassifier(nullptr);
#endif
}
}
/**
* Read a "c
|
ast_based
|
<fim_prefix>ompute the number of ASTC blocks in each dimension.
unsigned int block_count_x = (src_mip_w + block_x - 1) / block_x;
unsigned int block_count_y = (src_mip_h + block_y - 1) / block_y;
size_t comp_len = block_count_x * block_count_y * 16;
const astcenc_swizzle swizzle = {
ASTCENC_SWZ_R, ASTCENC_SWZ_G, ASTCENC_SWZ_B, ASTCENC_SWZ_A
};
status = astcenc_compress_image(context, &image, &swizzle, dest_mip_write, comp_len, 0);
ERR_BREAK_MSG(status != ASTCENC_SUCCESS,
vformat("astcenc: ASTC image compression failed: %s.", astcenc_get_error_string(status)));
astcenc_compress_reset(context);
}
astcenc_context_free(context);
// Replace original image with compressed one.
<fim_suffix>;
print_verbose(vformat("astcenc: Encoding took %d ms.", OS::get_singleton()->get_ticks_msec() - start_time));
}
#endif // TOOLS_ENABLED
void _decompress_astc(Image *r_img) {
const uint64_t start_time = OS::get_singleton()->get_ticks_msec();
// Determine decompression parameters from image format.
const Image::Format src_format = r_img->get_format();
bool is_hdr = false;
unsigned int block_x = 0;
unsigned int block_y = 0;
switch (src_format) {
case Image::FORMAT_ASTC_4x4: {
block_x = 4;
block_y = 4;
is_hdr = false;
} break;
case Image::FORMAT_ASTC_4x4_HDR: {
block_x = 4;
block_y = 4;
is_hdr = true;
} break;
case Image::FORMAT_ASTC_8x8: {
block_x = <fim_middle>r_img->set_data(width, height, has_mipmaps, target_format, dest_data)<fim_end>
|
ompute the number of ASTC blocks in each dimension.
unsigned int block_count_x = (src_mip_w + block_x - 1) / block_x;
unsigned int block_count_y = (src_mip_h + block_y - 1) / block_y;
size_t comp_len = block_count_x * block_count_y * 16;
const astcenc_swizzle swizzle = {
ASTCENC_SWZ_R, ASTCENC_SWZ_G, ASTCENC_SWZ_B, ASTCENC_SWZ_A
};
status = astcenc_compress_image(context, &image, &swizzle, dest_mip_write, comp_len, 0);
ERR_BREAK_MSG(status != ASTCENC_SUCCESS,
vformat("astcenc: ASTC image compression failed: %s.", astcenc_get_error_string(status)));
astcenc_compress_reset(context);
}
astcenc_context_free(context);
// Replace original image with compressed one.
|
r_img->set_data(width, height, has_mipmaps, target_format, dest_data)
|
;
print_verbose(vformat("astcenc: Encoding took %d ms.", OS::get_singleton()->get_ticks_msec() - start_time));
}
#endif // TOOLS_ENABLED
void _decompress_astc(Image *r_img) {
const uint64_t start_time = OS::get_singleton()->get_ticks_msec();
// Determine decompression parameters from image format.
const Image::Format src_format = r_img->get_format();
bool is_hdr = false;
unsigned int block_x = 0;
unsigned int block_y = 0;
switch (src_format) {
case Image::FORMAT_ASTC_4x4: {
block_x = 4;
block_y = 4;
is_hdr = false;
} break;
case Image::FORMAT_ASTC_4x4_HDR: {
block_x = 4;
block_y = 4;
is_hdr = true;
} break;
case Image::FORMAT_ASTC_8x8: {
block_x =
|
ast_based
|
<fim_prefix> const Color focus_color = get_theme_color(SNAME("focus_color"), SNAME("AnimationBezierTrackEdit"));
const Color track_focus_color = get_theme_color(SNAME("track_focus_color"), SNAME("AnimationBezierTrackEdit"));
const int h_separation = get_theme_constant(SNAME("h_separation"), SNAME("AnimationBezierTrackEdit"));
const int v_separation = get_theme_constant(SNAME("h_separation"), SNAME("AnimationBezierTrackEdit"));
if (has_focus()) {
draw_rect(Rect2(Point2(), get_size()), focus_color, false, Math::round(EDSCALE));
}
draw_line(Point2(limit, 0), Point2(limit, get_size().height), v_line_color, Math::round(EDSCALE));<fim_suffix>
track_v_scroll_max = v_separation;
int vofs = v_separation + track_v_scroll;
int margin = 0;
RBMap<int, Color> subtrack_colors;
Color selected_track_color;
subtracks.clear();
subtrack_icons.clear();
RBMap<String, Vector<int>> track_indices;
int track_count = animation->get_track_count();
for (int i = 0; i < track_count; ++i) {
if (!_is_track_displayed(i)) {
continue;
}
String base_path = String(animation->track_get_path(i));
int end = base_path.find_char(':');
if (end != -1) {
base_path = base_path.substr(0, end + 1);
}
Vector<int> indices = track_indices.has(base_path) ? track_indices[base_path] : Vector<int>();<fim_middle>
int right_limit = get_size().width;<fim_end>
|
const Color focus_color = get_theme_color(SNAME("focus_color"), SNAME("AnimationBezierTrackEdit"));
const Color track_focus_color = get_theme_color(SNAME("track_focus_color"), SNAME("AnimationBezierTrackEdit"));
const int h_separation = get_theme_constant(SNAME("h_separation"), SNAME("AnimationBezierTrackEdit"));
const int v_separation = get_theme_constant(SNAME("h_separation"), SNAME("AnimationBezierTrackEdit"));
if (has_focus()) {
draw_rect(Rect2(Point2(), get_size()), focus_color, false, Math::round(EDSCALE));
}
draw_line(Point2(limit, 0), Point2(limit, get_size().height), v_line_color, Math::round(EDSCALE));
|
int right_limit = get_size().width;
|
track_v_scroll_max = v_separation;
int vofs = v_separation + track_v_scroll;
int margin = 0;
RBMap<int, Color> subtrack_colors;
Color selected_track_color;
subtracks.clear();
subtrack_icons.clear();
RBMap<String, Vector<int>> track_indices;
int track_count = animation->get_track_count();
for (int i = 0; i < track_count; ++i) {
if (!_is_track_displayed(i)) {
continue;
}
String base_path = String(animation->track_get_path(i));
int end = base_path.find_char(':');
if (end != -1) {
base_path = base_path.substr(0, end + 1);
}
Vector<int> indices = track_indices.has(base_path) ? track_indices[base_path] : Vector<int>();
|
random
|
<fim_prefix>h; // number of threads to use for batch processing
enum llama_rope_scaling_type rope_scaling_type; // RoPE scaling type, from `enum llama_rope_scaling_type`
enum llama_pooling_type pooling_type; // whether to pool (sum) embedding results by sequence id
enum llama_attention_type attention_type; // attention type to use for embeddings
enum llama_flash_attn_type flash_attn_type; // when to enable Flash Attention
// ref: https://github.com/ggml-org/llama.cpp/pull/2054
float rope_freq_base; // RoPE base frequency, 0 = from model
float rope_freq_scale; // RoPE frequency scaling factor, 0 = from model
<fim_suffix> // YaRN extrapolation mix factor, negative = from model
float yarn_attn_factor; // YaRN magnitude scaling factor
float yarn_beta_fast; // YaRN low correction dim
float yarn_beta_slow; // YaRN high correction dim
uint32_t yarn_orig_ctx; // YaRN original context size
float defrag_thold; // [DEPRECATED] defragment the KV cache if holes/size > thold, <= 0 disabled (default)
ggml_backend_sched_eval_callback cb_eval;
void * cb_eval_user_data;
enum ggml_type type_k; // data type for K cache [EXPERIMENTAL]
enum ggml_type type_v; // data type for V cache [EXPERIMENTAL]
// Abort callback
<fim_middle>float yarn_ext_factor;<fim_end>
|
h; // number of threads to use for batch processing
enum llama_rope_scaling_type rope_scaling_type; // RoPE scaling type, from `enum llama_rope_scaling_type`
enum llama_pooling_type pooling_type; // whether to pool (sum) embedding results by sequence id
enum llama_attention_type attention_type; // attention type to use for embeddings
enum llama_flash_attn_type flash_attn_type; // when to enable Flash Attention
// ref: https://github.com/ggml-org/llama.cpp/pull/2054
float rope_freq_base; // RoPE base frequency, 0 = from model
float rope_freq_scale; // RoPE frequency scaling factor, 0 = from model
|
float yarn_ext_factor;
|
// YaRN extrapolation mix factor, negative = from model
float yarn_attn_factor; // YaRN magnitude scaling factor
float yarn_beta_fast; // YaRN low correction dim
float yarn_beta_slow; // YaRN high correction dim
uint32_t yarn_orig_ctx; // YaRN original context size
float defrag_thold; // [DEPRECATED] defragment the KV cache if holes/size > thold, <= 0 disabled (default)
ggml_backend_sched_eval_callback cb_eval;
void * cb_eval_user_data;
enum ggml_type type_k; // data type for K cache [EXPERIMENTAL]
enum ggml_type type_v; // data type for V cache [EXPERIMENTAL]
// Abort callback
|
ast_based
|
<fim_prefix> fprintf(stderr, "\n");
}
static bool params_parse(int argc, char ** argv, struct train_params * params) {
bool invalid_param = false;
bool reqd_param_found = false;
std::string arg;
struct train_params default_params = get_default_train_params();
const std::string arg_prefix = "--";
for (int i = 1; i < argc; i++) {
arg = argv[i];
if (arg.compare(0, arg_prefix.size(), arg_prefix) == 0) {
std::replace(arg.begin(), arg.end(), '_', '-');
}
if (arg == "--copy-vocab-from-model") {
if (++i >= argc) {
invalid_param = true;
break;
}<fim_suffix> invalid_param = true;
break;
}
reqd_param_found = true;
params->fn_llama2c_model = argv[i];
} else if (arg == "--llama2c-output-model") {
if (++i >= argc) {
invalid_param = true;
break;
}
params->fn_llama2c_output_model = argv[i];
} else if (arg == "-h" || arg == "--help") {
print_usage(argc, argv, &default_params);
exit(0);
} else {
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
print_usage(argc, argv, &default_params);
exit(1);
}
}<fim_middle> params->fn_vocab_model = argv[i];
} else if (arg == "--llama2c-model") {
if (++i >= argc) {<fim_end>
|
fprintf(stderr, "\n");
}
static bool params_parse(int argc, char ** argv, struct train_params * params) {
bool invalid_param = false;
bool reqd_param_found = false;
std::string arg;
struct train_params default_params = get_default_train_params();
const std::string arg_prefix = "--";
for (int i = 1; i < argc; i++) {
arg = argv[i];
if (arg.compare(0, arg_prefix.size(), arg_prefix) == 0) {
std::replace(arg.begin(), arg.end(), '_', '-');
}
if (arg == "--copy-vocab-from-model") {
if (++i >= argc) {
invalid_param = true;
break;
}
|
params->fn_vocab_model = argv[i];
} else if (arg == "--llama2c-model") {
if (++i >= argc) {
|
invalid_param = true;
break;
}
reqd_param_found = true;
params->fn_llama2c_model = argv[i];
} else if (arg == "--llama2c-output-model") {
if (++i >= argc) {
invalid_param = true;
break;
}
params->fn_llama2c_output_model = argv[i];
} else if (arg == "-h" || arg == "--help") {
print_usage(argc, argv, &default_params);
exit(0);
} else {
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
print_usage(argc, argv, &default_params);
exit(1);
}
}
|
random
|
<fim_prefix>();
ERR_FAIL_COND(ae.is_null());
//TODO
DisplayServer::get_singleton()->accessibility_update_set_role(ae, DisplayServer::AccessibilityRole::ROLE_STATIC_TEXT);
DisplayServer::get_singleton()->accessibility_update_set_value(ae, TTR(vformat("The %s is not accessible at this time.", "Animation bezier track editor")));
} break;
case NOTIFICATION_DRAW: {
if (animation.is_null()) {
return;
}
int limit = timeline->get_name_limit();
const Ref<Font> font = get_theme_font(SceneStringName(font), SNAME("Label"));
const int font_size = get_theme_font_size(SceneStringName(font_size), SNAME("Label"));
const Color color = get_theme_color(SceneStringName(font_color), <fim_suffix>);
const Color h_line_color = get_theme_color(SNAME("h_line_color"), SNAME("AnimationBezierTrackEdit"));
const Color v_line_color = get_theme_color(SNAME("v_line_color"), SNAME("AnimationBezierTrackEdit"));
const Color focus_color = get_theme_color(SNAME("focus_color"), SNAME("AnimationBezierTrackEdit"));
const Color track_focus_color = get_theme_color(SNAME("track_focus_color"), SNAME("AnimationBezierTrackEdit"));
const int h_separation = get_theme_constant(SNAME("h_separation"), SNAME("AnimationBezierTrackEdit"));
const int v_separation = get_theme_constant(SNAME("h_separation"), SNAME("AnimationBezierTrackEdit"));
if (has_focus()) {
draw_rect(Rect2(Point2(), ge<fim_middle>SNAME("Label")<fim_end>
|
();
ERR_FAIL_COND(ae.is_null());
//TODO
DisplayServer::get_singleton()->accessibility_update_set_role(ae, DisplayServer::AccessibilityRole::ROLE_STATIC_TEXT);
DisplayServer::get_singleton()->accessibility_update_set_value(ae, TTR(vformat("The %s is not accessible at this time.", "Animation bezier track editor")));
} break;
case NOTIFICATION_DRAW: {
if (animation.is_null()) {
return;
}
int limit = timeline->get_name_limit();
const Ref<Font> font = get_theme_font(SceneStringName(font), SNAME("Label"));
const int font_size = get_theme_font_size(SceneStringName(font_size), SNAME("Label"));
const Color color = get_theme_color(SceneStringName(font_color),
|
SNAME("Label")
|
);
const Color h_line_color = get_theme_color(SNAME("h_line_color"), SNAME("AnimationBezierTrackEdit"));
const Color v_line_color = get_theme_color(SNAME("v_line_color"), SNAME("AnimationBezierTrackEdit"));
const Color focus_color = get_theme_color(SNAME("focus_color"), SNAME("AnimationBezierTrackEdit"));
const Color track_focus_color = get_theme_color(SNAME("track_focus_color"), SNAME("AnimationBezierTrackEdit"));
const int h_separation = get_theme_constant(SNAME("h_separation"), SNAME("AnimationBezierTrackEdit"));
const int v_separation = get_theme_constant(SNAME("h_separation"), SNAME("AnimationBezierTrackEdit"));
if (has_focus()) {
draw_rect(Rect2(Point2(), ge
|
ast_based
|
<fim_prefix>ma_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size);
// Get the number of metadata key/value pairs
LLAMA_API int32_t llama_model_meta_count(const struct llama_model * model);
// Get metadata key name by index
LLAMA_API int32_t llama_model_meta_key_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size);
// Get metadata value as a string by index
LLAMA_API int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size);
// Get a string describing the model type
LLAMA_API int32_t llama_model_desc(const struct llama_model * model, <fim_suffix>, size_t buf_size);
// Returns the total size of all the tensors in the model in bytes
LLAMA_API uint64_t llama_model_size(const struct llama_model * model);
// Get the default chat template. Returns nullptr if not available
// If name is NULL, returns the default chat template
LLAMA_API const char * llama_model_chat_template(const struct llama_model * model, const char * name);
// Returns the total number of parameters in the model
LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model);
// Returns true if the model contains an encoder that requires llama_encode() call
LLAMA_API bool llama_model_has_encoder(const struct llama_model *<fim_middle>char * buf<fim_end>
|
ma_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size);
// Get the number of metadata key/value pairs
LLAMA_API int32_t llama_model_meta_count(const struct llama_model * model);
// Get metadata key name by index
LLAMA_API int32_t llama_model_meta_key_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size);
// Get metadata value as a string by index
LLAMA_API int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size);
// Get a string describing the model type
LLAMA_API int32_t llama_model_desc(const struct llama_model * model,
|
char * buf
|
, size_t buf_size);
// Returns the total size of all the tensors in the model in bytes
LLAMA_API uint64_t llama_model_size(const struct llama_model * model);
// Get the default chat template. Returns nullptr if not available
// If name is NULL, returns the default chat template
LLAMA_API const char * llama_model_chat_template(const struct llama_model * model, const char * name);
// Returns the total number of parameters in the model
LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model);
// Returns true if the model contains an encoder that requires llama_encode() call
LLAMA_API bool llama_model_has_encoder(const struct llama_model *
|
ast_based
|
<fim_prefix>.get(), kid);
adapter.alora_invocation_tokens.resize(seq_len);
std::copy(
(const llama_token *)data,
(const llama_token *)data + seq_len,
adapter.alora_invocation_tokens.begin());
}
}
int n_tensors = gguf_get_n_tensors(ctx_gguf.get());
// contexts for each buffer type
std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
auto it = ctx_map.find(buft);
if (it == ctx_map.end()) {
// add a new context
ggml_init_params params = {
/*.mem_size =*/ n_tensors*<fim_suffix>,
/*.mem_buffer =*/ NULL,
/*.no_alloc =*/ true,
};
ggml_context * buft_ctx = ggml_init(params);
if (!buft_ctx) {
return nullptr;
}
ctx_map[buft] = buft_ctx;
adapter.ctxs.emplace_back(buft_ctx);
return buft_ctx;
};
return it->second;
};
// bundle lora_a and lora_b into pairs
std::map<std::string, llama_adapter_lora_weight> ab_map;
auto str_endswith = [](const std::string & str, const std::string & suffix) {
return str.size() >= suffix.size() && str.compare(str.size()-suffix.size(), suffix.size(), suffix) == 0;
};
<fim_middle>ggml_tensor_overhead()<fim_end>
|
.get(), kid);
adapter.alora_invocation_tokens.resize(seq_len);
std::copy(
(const llama_token *)data,
(const llama_token *)data + seq_len,
adapter.alora_invocation_tokens.begin());
}
}
int n_tensors = gguf_get_n_tensors(ctx_gguf.get());
// contexts for each buffer type
std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
auto it = ctx_map.find(buft);
if (it == ctx_map.end()) {
// add a new context
ggml_init_params params = {
/*.mem_size =*/ n_tensors*
|
ggml_tensor_overhead()
|
,
/*.mem_buffer =*/ NULL,
/*.no_alloc =*/ true,
};
ggml_context * buft_ctx = ggml_init(params);
if (!buft_ctx) {
return nullptr;
}
ctx_map[buft] = buft_ctx;
adapter.ctxs.emplace_back(buft_ctx);
return buft_ctx;
};
return it->second;
};
// bundle lora_a and lora_b into pairs
std::map<std::string, llama_adapter_lora_weight> ab_map;
auto str_endswith = [](const std::string & str, const std::string & suffix) {
return str.size() >= suffix.size() && str.compare(str.size()-suffix.size(), suffix.size(), suffix) == 0;
};
|
ast_based
|
<fim_prefix>ect_key", callable_mp(editor, &AnimationTrackEditor::_key_deselected), CONNECT_DEFERRED);
}
void AnimationBezierTrackEdit::_play_position_draw() {
if (animation.is_null() || play_position_pos < 0) {
return;
}
float scale = timeline->get_zoom_scale();
int h = get_size().height;
int limit = timeline->get_name_limit();
int px = (-timeline->get_value() + play_position_pos) * scale + limit;
if (px >= limit && px < (get_size().width)) {
const Color color = get_theme_color(SNAME("accent_color"), EditorStringName(Editor));
play_position->draw_line(Point2(px, 0), Point2(px, h), color, Math::round(2 * EDSCALE));
}
}
void AnimationBezierTrackEdit::set_play_position(real_t p_pos) {
<fim_suffix>
play_position->queue_redraw();
}
void AnimationBezierTrackEdit::update_play_position() {
play_position->queue_redraw();
}
void AnimationBezierTrackEdit::set_root(Node *p_root) {
root = p_root;
}
void AnimationBezierTrackEdit::set_filtered(bool p_filtered) {
is_filtered = p_filtered;
if (animation.is_null()) {
return;
}
String base_path = String(animation->track_get_path(selected_track));
if (is_filtered) {
if (root && root->has_node(base_path)) {
Node *node = root->get_node(base_path);
if (!node || !EditorNode::get_singleton()->get_editor_selection()->is_selected(node)) {
for (int i = 0; i < animation->get_track_count(); ++i) {
if (animation->track_get_type(i) <fim_middle>play_position_pos = p_pos;<fim_end>
|
ect_key", callable_mp(editor, &AnimationTrackEditor::_key_deselected), CONNECT_DEFERRED);
}
void AnimationBezierTrackEdit::_play_position_draw() {
if (animation.is_null() || play_position_pos < 0) {
return;
}
float scale = timeline->get_zoom_scale();
int h = get_size().height;
int limit = timeline->get_name_limit();
int px = (-timeline->get_value() + play_position_pos) * scale + limit;
if (px >= limit && px < (get_size().width)) {
const Color color = get_theme_color(SNAME("accent_color"), EditorStringName(Editor));
play_position->draw_line(Point2(px, 0), Point2(px, h), color, Math::round(2 * EDSCALE));
}
}
void AnimationBezierTrackEdit::set_play_position(real_t p_pos) {
|
play_position_pos = p_pos;
|
play_position->queue_redraw();
}
void AnimationBezierTrackEdit::update_play_position() {
play_position->queue_redraw();
}
void AnimationBezierTrackEdit::set_root(Node *p_root) {
root = p_root;
}
void AnimationBezierTrackEdit::set_filtered(bool p_filtered) {
is_filtered = p_filtered;
if (animation.is_null()) {
return;
}
String base_path = String(animation->track_get_path(selected_track));
if (is_filtered) {
if (root && root->has_node(base_path)) {
Node *node = root->get_node(base_path);
if (!node || !EditorNode::get_singleton()->get_editor_selection()->is_selected(node)) {
for (int i = 0; i < animation->get_track_count(); ++i) {
if (animation->track_get_type(i)
|
ast_based
|
<fim_prefix> static char32_t prev_wc = 0;
char32_t unicode = p_unicode;
if ((p_unicode & 0xfffffc00) == 0xd800) {
if (prev_wc != 0) {
ERR_PRINT("invalid utf16 surrogate input");
}
prev_wc = unicode;
return; // Skip surrogate.
} else if ((unicode & 0xfffffc00) == 0xdc00) {
if (prev_wc == 0) {
ERR_PRINT("invalid utf16 surrogate input");
return; // Skip invalid surrogate.
}
unicode = (prev_wc << 10UL) + unicode - ((0xd800 << 10UL) + 0xdc00 - 0x10000);
prev_wc = 0;
} else {
prev_wc = 0;
}
Ref<InputEventKey> ev;
ev.instantiate();
Key physical_keycode = godot_code_from_android_code(p_physical_keycode);<fim_suffix> if (unicode == '\b') { // 0x08
keycode = Key::BACKSPACE;
} else if (unicode == '\t') { // 0x09
keycode = Key::TAB;
} else if (unicode == '\n') { // 0x0A
keycode = Key::ENTER;
} else if (unicode == 0x1B) {
keycode = Key::ESCAPE;
} else if (unicode == 0x1F) {
keycode = Key::KEY_DELETE;
} else {
keycode = fix_keycode(unicode, physical_keycode);
}
switch (physical_keycode) {
case Key::SHIFT: {
shift_mem = p_pressed;
} break;
case Key::ALT: {
alt_mem = p_pressed;
} break;
case Key::CTRL: {
control_mem = p_pressed;
} break;
case Key::META: {
meta_mem = p_pressed;
} break;
default:
break;
}
ev->set_keycode(keycode);<fim_middle> Key keycode;<fim_end>
|
static char32_t prev_wc = 0;
char32_t unicode = p_unicode;
if ((p_unicode & 0xfffffc00) == 0xd800) {
if (prev_wc != 0) {
ERR_PRINT("invalid utf16 surrogate input");
}
prev_wc = unicode;
return; // Skip surrogate.
} else if ((unicode & 0xfffffc00) == 0xdc00) {
if (prev_wc == 0) {
ERR_PRINT("invalid utf16 surrogate input");
return; // Skip invalid surrogate.
}
unicode = (prev_wc << 10UL) + unicode - ((0xd800 << 10UL) + 0xdc00 - 0x10000);
prev_wc = 0;
} else {
prev_wc = 0;
}
Ref<InputEventKey> ev;
ev.instantiate();
Key physical_keycode = godot_code_from_android_code(p_physical_keycode);
|
Key keycode;
|
if (unicode == '\b') { // 0x08
keycode = Key::BACKSPACE;
} else if (unicode == '\t') { // 0x09
keycode = Key::TAB;
} else if (unicode == '\n') { // 0x0A
keycode = Key::ENTER;
} else if (unicode == 0x1B) {
keycode = Key::ESCAPE;
} else if (unicode == 0x1F) {
keycode = Key::KEY_DELETE;
} else {
keycode = fix_keycode(unicode, physical_keycode);
}
switch (physical_keycode) {
case Key::SHIFT: {
shift_mem = p_pressed;
} break;
case Key::ALT: {
alt_mem = p_pressed;
} break;
case Key::CTRL: {
control_mem = p_pressed;
} break;
case Key::META: {
meta_mem = p_pressed;
} break;
default:
break;
}
ev->set_keycode(keycode);
|
random
|
<fim_prefix>
// Initialize astcenc.
const int64_t dest_size = Image::get_image_data_size(width, height, target_format, has_mipmaps);
Vector<uint8_t> dest_data;
dest_data.resize(dest_size);
uint8_t *dest_write = dest_data.ptrw();
astcenc_config config;
config.block_x = block_x;
config.block_y = block_y;
config.profile = profile;
const float quality = ASTCENC_PRE_MEDIUM;
astcenc_error status = astcenc_config_init(profile, block_x, block_y, 1, quality, 0, &config);
ERR_FAIL_COND_MSG(status != ASTCENC_SUCCESS,
vformat("astcenc: Configuration initialization failed: %s.", astcenc_get_error_string(status)));
// Context allocation.<fim_suffix> status = astcenc_context_alloc(&config, thread_count, &context);
ERR_FAIL_COND_MSG(status != ASTCENC_SUCCESS,
vformat("astcenc: Context allocation failed: %s.", astcenc_get_error_string(status)));
const int mip_count = has_mipmaps ? Image::get_image_required_mipmaps(width, height, target_format) : 0;
const uint8_t *src_data = r_img->ptr();
for (int i = 0; i < mip_count + 1; i++) {
int src_mip_w, src_mip_h;
const int64_t src_ofs = Image::get_image_mipmap_offset_and_dimensions(width, height, r_img->get_format(), i, src_mip_w, src_mip_h);
const uint8_t *mip_data = &src_data[src_ofs];
const int64_t dst_ofs = Image::get_image_mipmap_offset(width, height, target_format, i);<fim_middle> astcenc_context *context;
const unsigned int thread_count = 1; // Godot compresses multiple images each on a thread, which is more efficient for large amount of images imported.<fim_end>
|
// Initialize astcenc.
const int64_t dest_size = Image::get_image_data_size(width, height, target_format, has_mipmaps);
Vector<uint8_t> dest_data;
dest_data.resize(dest_size);
uint8_t *dest_write = dest_data.ptrw();
astcenc_config config;
config.block_x = block_x;
config.block_y = block_y;
config.profile = profile;
const float quality = ASTCENC_PRE_MEDIUM;
astcenc_error status = astcenc_config_init(profile, block_x, block_y, 1, quality, 0, &config);
ERR_FAIL_COND_MSG(status != ASTCENC_SUCCESS,
vformat("astcenc: Configuration initialization failed: %s.", astcenc_get_error_string(status)));
// Context allocation.
|
astcenc_context *context;
const unsigned int thread_count = 1; // Godot compresses multiple images each on a thread, which is more efficient for large amount of images imported.
|
status = astcenc_context_alloc(&config, thread_count, &context);
ERR_FAIL_COND_MSG(status != ASTCENC_SUCCESS,
vformat("astcenc: Context allocation failed: %s.", astcenc_get_error_string(status)));
const int mip_count = has_mipmaps ? Image::get_image_required_mipmaps(width, height, target_format) : 0;
const uint8_t *src_data = r_img->ptr();
for (int i = 0; i < mip_count + 1; i++) {
int src_mip_w, src_mip_h;
const int64_t src_ofs = Image::get_image_mipmap_offset_and_dimensions(width, height, r_img->get_format(), i, src_mip_w, src_mip_h);
const uint8_t *mip_data = &src_data[src_ofs];
const int64_t dst_ofs = Image::get_image_mipmap_offset(width, height, target_format, i);
|
random
|
<fim_prefix>/ number of generated tokens
int32_t n_reused; // number of times a ggml compute graph had been reused
};
struct llama_perf_sampler_data {
double t_sample_ms; // time needed for sampling in ms
int32_t n_sample; // number of sampled tokens
};
LLAMA_API struct llama_perf_context_data llama_perf_context (const struct llama_context * ctx);
LLAMA_API void llama_perf_context_print(const struct llama_context * ctx);
LLAMA_API void llama_perf_context_reset( struct llama_context * ctx);
// NOTE: the following work only with samplers constructed via llama_sampler_chain_init
LLA<fim_suffix> LLAMA_API void llama_perf_sampler_print(const struct llama_sampler * chain);
LLAMA_API void llama_perf_sampler_reset( struct llama_sampler * chain);
// print a breakdown of per-device memory use via LLAMA_LOG:
LLAMA_API void llama_memory_breakdown_print(const struct llama_context * ctx);
//
// training
//
// function that returns whether or not a given tensor contains trainable parameters
typedef bool (*llama_opt_param_filter)(const struct ggml_tensor * tensor, void * userdata);
// always returns true
LLAMA_API bool llama_opt_param_filter_all(const struct ggml_tensor * tensor, void * userda<fim_middle>MA_API struct llama_perf_sampler_data llama_perf_sampler (const struct llama_sampler * chain);
<fim_end>
|
/ number of generated tokens
int32_t n_reused; // number of times a ggml compute graph had been reused
};
struct llama_perf_sampler_data {
double t_sample_ms; // time needed for sampling in ms
int32_t n_sample; // number of sampled tokens
};
LLAMA_API struct llama_perf_context_data llama_perf_context (const struct llama_context * ctx);
LLAMA_API void llama_perf_context_print(const struct llama_context * ctx);
LLAMA_API void llama_perf_context_reset( struct llama_context * ctx);
// NOTE: the following work only with samplers constructed via llama_sampler_chain_init
LLA
|
MA_API struct llama_perf_sampler_data llama_perf_sampler (const struct llama_sampler * chain);
|
LLAMA_API void llama_perf_sampler_print(const struct llama_sampler * chain);
LLAMA_API void llama_perf_sampler_reset( struct llama_sampler * chain);
// print a breakdown of per-device memory use via LLAMA_LOG:
LLAMA_API void llama_memory_breakdown_print(const struct llama_context * ctx);
//
// training
//
// function that returns whether or not a given tensor contains trainable parameters
typedef bool (*llama_opt_param_filter)(const struct ggml_tensor * tensor, void * userdata);
// always returns true
LLAMA_API bool llama_opt_param_filter_all(const struct ggml_tensor * tensor, void * userda
|
ast_based
|
<fim_prefix>()) : value;
replace_all(print_value, "\n", "\\n");
LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), print_value.c_str());
}
auto get_kv_str = [&](const std::string & key) -> std::string {
int id = gguf_find_key(gguf_ctx, key.c_str());
return id < 0 ? "" : std::string(gguf_get_val_str(gguf_ctx, id));
};
auto get_kv_f32 = [&](const std::string & key) -> float {
int id = gguf_find_key(gguf_ctx, key.c_str());
return id < 0 ? 0.0f : gguf_get_val_f32(gguf_ctx, id);
};
LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
auto general_type = <fim_suffix>;
if (general_type != "adapter") {
throw std::runtime_error("expect general.type to be 'adapter', but got: " + general_type);
}
auto general_arch_str = get_kv_str(llm_kv(LLM_KV_GENERAL_ARCHITECTURE));
auto general_arch = llm_arch_from_string(general_arch_str);
if (general_arch != model.arch) {
throw std::runtime_error("model arch and LoRA arch mismatch");
}
auto adapter_type = get_kv_str(llm_kv(LLM_KV_ADAPTER_TYPE));
if (adapter_type != "lora") {
throw std::runtime_error("expect adapter.type to be 'lora', but got: " + adapter_type);
}
adapter.alpha = get_kv_f32(llm_kv(LLM_KV<fim_middle>get_kv_str(llm_kv(LLM_KV_GENERAL_TYPE))<fim_end>
|
()) : value;
replace_all(print_value, "\n", "\\n");
LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), print_value.c_str());
}
auto get_kv_str = [&](const std::string & key) -> std::string {
int id = gguf_find_key(gguf_ctx, key.c_str());
return id < 0 ? "" : std::string(gguf_get_val_str(gguf_ctx, id));
};
auto get_kv_f32 = [&](const std::string & key) -> float {
int id = gguf_find_key(gguf_ctx, key.c_str());
return id < 0 ? 0.0f : gguf_get_val_f32(gguf_ctx, id);
};
LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
auto general_type =
|
get_kv_str(llm_kv(LLM_KV_GENERAL_TYPE))
|
;
if (general_type != "adapter") {
throw std::runtime_error("expect general.type to be 'adapter', but got: " + general_type);
}
auto general_arch_str = get_kv_str(llm_kv(LLM_KV_GENERAL_ARCHITECTURE));
auto general_arch = llm_arch_from_string(general_arch_str);
if (general_arch != model.arch) {
throw std::runtime_error("model arch and LoRA arch mismatch");
}
auto adapter_type = get_kv_str(llm_kv(LLM_KV_ADAPTER_TYPE));
if (adapter_type != "lora") {
throw std::runtime_error("expect adapter.type to be 'lora', but got: " + adapter_type);
}
adapter.alpha = get_kv_f32(llm_kv(LLM_KV
|
ast_based
|
<fim_prefix> fprintf(stderr, "\n");
}
static bool params_parse(int argc, char ** argv, struct train_params * params) {
bool invalid_param = false;
bool reqd_param_found = false;
std::string arg;
struct train_params default_params = get_default_train_params();
const std::string arg_prefix = "--";
for (int i = 1; i < argc; i++) {
arg = argv[i];
if (arg.compare(0, arg_prefix.size(), arg_prefix) == 0) {
std::replace(arg.begin(), arg.end(), '_', '-');
}
if (arg == "--copy-vocab-from-model") {<fim_suffix> params->fn_vocab_model = argv[i];
} else if (arg == "--llama2c-model") {
if (++i >= argc) {
invalid_param = true;
break;
}
reqd_param_found = true;
params->fn_llama2c_model = argv[i];
} else if (arg == "--llama2c-output-model") {
if (++i >= argc) {
invalid_param = true;
break;
}
params->fn_llama2c_output_model = argv[i];
} else if (arg == "-h" || arg == "--help") {
print_usage(argc, argv, &default_params);
exit(0);
} else {<fim_middle> if (++i >= argc) {
invalid_param = true;
break;
}<fim_end>
|
fprintf(stderr, "\n");
}
static bool params_parse(int argc, char ** argv, struct train_params * params) {
bool invalid_param = false;
bool reqd_param_found = false;
std::string arg;
struct train_params default_params = get_default_train_params();
const std::string arg_prefix = "--";
for (int i = 1; i < argc; i++) {
arg = argv[i];
if (arg.compare(0, arg_prefix.size(), arg_prefix) == 0) {
std::replace(arg.begin(), arg.end(), '_', '-');
}
if (arg == "--copy-vocab-from-model") {
|
if (++i >= argc) {
invalid_param = true;
break;
}
|
params->fn_vocab_model = argv[i];
} else if (arg == "--llama2c-model") {
if (++i >= argc) {
invalid_param = true;
break;
}
reqd_param_found = true;
params->fn_llama2c_model = argv[i];
} else if (arg == "--llama2c-output-model") {
if (++i >= argc) {
invalid_param = true;
break;
}
params->fn_llama2c_output_model = argv[i];
} else if (arg == "-h" || arg == "--help") {
print_usage(argc, argv, &default_params);
exit(0);
} else {
|
random
|
<fim_prefix>/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */<fim_suffix>
#pragma once
#ifdef ACCESSKIT_ENABLED
#include "core/templates/rid_owner.h"
#include "servers/display_server.h"
#ifdef ACCESSKIT_DYNAMIC
#ifdef LINUXBSD_ENABLED
#include "drivers/accesskit/dynwrappers/accesskit-so_wrap.h"
#endif
#ifdef MACOS_ENABLED
#include "drivers/accesskit/dynwrappers/accesskit-dylib_wrap.h"
#endif
#ifdef WINDOWS_ENABLED
#include "drivers/accesskit/dynwrappers/accesskit-dll_wrap.h"
#endif
#else
#include <accesskit.h>
#endif
class AccessibilityDriverAccessKit : public AccessibilityDriver {
static AccessibilityDriverAccessKit *singleton;
struct AccessibilityElement {
HashMap<accesskit_action, Callable> actions;
<fim_middle>/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/<fim_end>
|
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
|
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
|
#pragma once
#ifdef ACCESSKIT_ENABLED
#include "core/templates/rid_owner.h"
#include "servers/display_server.h"
#ifdef ACCESSKIT_DYNAMIC
#ifdef LINUXBSD_ENABLED
#include "drivers/accesskit/dynwrappers/accesskit-so_wrap.h"
#endif
#ifdef MACOS_ENABLED
#include "drivers/accesskit/dynwrappers/accesskit-dylib_wrap.h"
#endif
#ifdef WINDOWS_ENABLED
#include "drivers/accesskit/dynwrappers/accesskit-dll_wrap.h"
#endif
#else
#include <accesskit.h>
#endif
class AccessibilityDriverAccessKit : public AccessibilityDriver {
static AccessibilityDriverAccessKit *singleton;
struct AccessibilityElement {
HashMap<accesskit_action, Callable> actions;
|
random
|
<fim_prefix>CCESSKIT_ROLE_TEXT_RUN && p_request->action == ACCESSKIT_ACTION_SCROLL_INTO_VIEW) {
AccessibilityElement *root_ae = singleton->rid_owner.get_or_null(ae->parent);
ERR_FAIL_NULL(root_ae);
ae = root_ae;
rq_data = ae->run;
}
if (ae->actions.has(p_request->action)) {
Callable &cb = ae->actions[p_request->action];
if (cb.is_valid()) {
if (p_request->data.has_value) {
switch (p_request->data.value.tag) {
case ACCESSKIT_ACTION_DATA_CUSTOM_ACTION: {
rq_data = p_request->data.value.custom_action;
} break;
case ACCESSKIT_ACTION_DATA_VALUE: {
rq_data = String::utf8(p_request->data.value.value);
} break;
case ACCESSKIT_ACTION_DATA_NUMERIC_VALUE: <fim_suffix> break;
case ACCESSKIT_ACTION_DATA_SCROLL_HINT: {
switch (p_request->data.value.scroll_hint) {
case ACCESSKIT_SCROLL_HINT_TOP_LEFT: {
rq_data = DisplayServer::SCROLL_HINT_TOP_LEFT;
} break;
case ACCESSKIT_SCROLL_HINT_BOTTOM_RIGHT: {
rq_data = DisplayServer::SCROLL_HINT_BOTTOM_RIGHT;
} break;
case ACCESSKIT_SCROLL_HINT_TOP_EDGE: {
rq_data = DisplayServer::SCROLL_HINT_TOP_EDGE;
} break;
case ACCESSKIT_SCROLL_HINT_BOTTOM_EDGE: {
rq_data = DisplayServer::SCROLL_HINT_BOTTOM_EDGE;
} break;
case ACCESSKIT_SCROLL_HINT_LEFT_EDGE: {
rq_data = DisplayServer::SCROLL_HINT_LEFT_EDGE;
} <fim_middle>{
rq_data = p_request->data.value.numeric_value;
}<fim_end>
|
CCESSKIT_ROLE_TEXT_RUN && p_request->action == ACCESSKIT_ACTION_SCROLL_INTO_VIEW) {
AccessibilityElement *root_ae = singleton->rid_owner.get_or_null(ae->parent);
ERR_FAIL_NULL(root_ae);
ae = root_ae;
rq_data = ae->run;
}
if (ae->actions.has(p_request->action)) {
Callable &cb = ae->actions[p_request->action];
if (cb.is_valid()) {
if (p_request->data.has_value) {
switch (p_request->data.value.tag) {
case ACCESSKIT_ACTION_DATA_CUSTOM_ACTION: {
rq_data = p_request->data.value.custom_action;
} break;
case ACCESSKIT_ACTION_DATA_VALUE: {
rq_data = String::utf8(p_request->data.value.value);
} break;
case ACCESSKIT_ACTION_DATA_NUMERIC_VALUE:
|
{
rq_data = p_request->data.value.numeric_value;
}
|
break;
case ACCESSKIT_ACTION_DATA_SCROLL_HINT: {
switch (p_request->data.value.scroll_hint) {
case ACCESSKIT_SCROLL_HINT_TOP_LEFT: {
rq_data = DisplayServer::SCROLL_HINT_TOP_LEFT;
} break;
case ACCESSKIT_SCROLL_HINT_BOTTOM_RIGHT: {
rq_data = DisplayServer::SCROLL_HINT_BOTTOM_RIGHT;
} break;
case ACCESSKIT_SCROLL_HINT_TOP_EDGE: {
rq_data = DisplayServer::SCROLL_HINT_TOP_EDGE;
} break;
case ACCESSKIT_SCROLL_HINT_BOTTOM_EDGE: {
rq_data = DisplayServer::SCROLL_HINT_BOTTOM_EDGE;
} break;
case ACCESSKIT_SCROLL_HINT_LEFT_EDGE: {
rq_data = DisplayServer::SCROLL_HINT_LEFT_EDGE;
}
|
ast_based
|
<fim_prefix>scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
}
}
key_order[ofs] = i;
}
for (RBMap<real_t, int>::Element *E = key_order.front(); E; E = E->next()) {
int i = E->get();
if (!E->next()) {
break;
}
int i_n = E->next()->get();
float offset = animation->track_get_key_time(p_track, i);
float height = animation->bezier_track_get_key_value(p_track, i);
Vector2 out_handle = animation->bezier_track_get_key_out_handle(p_track, i);
if (p_track == moving_handle_track && (moving_handle == -1 || moving_handle == 1) && moving_handle_key == i) {
out_handle = moving_handle_right;
}
if (selection.has(IntPair(p_track, i))) {
if (moving_selection) {
<fim_suffix>
} else if (scaling_selection) {
offset += -scaling_selection_offset.x + (offset - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
height += -scaling_selection_offset.y + (height - scaling_selection_pivot.y) * (scaling_selection_scale.y - 1);
}
}
float offset_n = animation->track_get_key_time(p_track, i_n);
float height_n = animation->bezier_track_get_key_value(p_track, i_n);
Vector2 in_handle = animation->bezier_track_get_key_in_handle(p_track, i_n);
if (p_track == moving_handle_track && (moving_handle == -1 || moving_handle == 1) && moving_handle_key == i_n) {
in_handle = moving_handle_left;
}
if (selection.has(IntPair(p_track, i_n))) {
if <fim_middle>offset += moving_selection_offset.x;
height += moving_selection_offset.y;<fim_end>
|
scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
}
}
key_order[ofs] = i;
}
for (RBMap<real_t, int>::Element *E = key_order.front(); E; E = E->next()) {
int i = E->get();
if (!E->next()) {
break;
}
int i_n = E->next()->get();
float offset = animation->track_get_key_time(p_track, i);
float height = animation->bezier_track_get_key_value(p_track, i);
Vector2 out_handle = animation->bezier_track_get_key_out_handle(p_track, i);
if (p_track == moving_handle_track && (moving_handle == -1 || moving_handle == 1) && moving_handle_key == i) {
out_handle = moving_handle_right;
}
if (selection.has(IntPair(p_track, i))) {
if (moving_selection) {
|
offset += moving_selection_offset.x;
height += moving_selection_offset.y;
|
} else if (scaling_selection) {
offset += -scaling_selection_offset.x + (offset - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
height += -scaling_selection_offset.y + (height - scaling_selection_pivot.y) * (scaling_selection_scale.y - 1);
}
}
float offset_n = animation->track_get_key_time(p_track, i_n);
float height_n = animation->bezier_track_get_key_value(p_track, i_n);
Vector2 in_handle = animation->bezier_track_get_key_in_handle(p_track, i_n);
if (p_track == moving_handle_track && (moving_handle == -1 || moving_handle == 1) && moving_handle_key == i_n) {
in_handle = moving_handle_left;
}
if (selection.has(IntPair(p_track, i_n))) {
if
|
ast_based
|
<fim_prefix>
static void llama_adapter_lora_init_impl(llama_model & model, const char * path_lora, llama_adapter_lora & adapter) {
LLAMA_LOG_INFO("%s: loading lora adapter from '%s' ...\n", __func__, path_lora);
ggml_context * ctx_init;
gguf_init_params meta_gguf_params = {
/* .no_alloc = */ true,
/* .ctx = */ &ctx_init,
};
gguf_context_ptr ctx_gguf { gguf_init_from_file(path_lora, meta_gguf_params) };
if (!ctx_gguf) {
throw std::runtime_error("failed to load lora adapter file from " + std::string(path_lora));
}
ggml_context_ptr ctx { ctx_init };
// check metadata
{
const gguf_context * gguf_ctx = ctx_gguf.get();
<fim_suffix>;
// get metadata as string
for (int i = 0; i < gguf_get_n_kv(gguf_ctx); i++) {
gguf_type type = gguf_get_kv_type(gguf_ctx, i);
const std::string type_name =
type == GGUF_TYPE_ARRAY
? format("%s[%s,%zu]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(gguf_ctx, i)), gguf_get_arr_n(gguf_ctx, i))
: gguf_type_name(type);
const char * name = gguf_get_key(gguf_ctx, i);
const std::string value = gguf_kv_to_str(gguf_ctx, i);
if (type != GGUF_TYPE_ARRAY) {
adapter.gguf_kv.emplace(name, value);
}
const size_t MAX_VALUE_LEN = 40;<fim_middle>LLAMA_LOG_INFO("%s: Dumping metadata keys/values.\n", __func__)<fim_end>
|
static void llama_adapter_lora_init_impl(llama_model & model, const char * path_lora, llama_adapter_lora & adapter) {
LLAMA_LOG_INFO("%s: loading lora adapter from '%s' ...\n", __func__, path_lora);
ggml_context * ctx_init;
gguf_init_params meta_gguf_params = {
/* .no_alloc = */ true,
/* .ctx = */ &ctx_init,
};
gguf_context_ptr ctx_gguf { gguf_init_from_file(path_lora, meta_gguf_params) };
if (!ctx_gguf) {
throw std::runtime_error("failed to load lora adapter file from " + std::string(path_lora));
}
ggml_context_ptr ctx { ctx_init };
// check metadata
{
const gguf_context * gguf_ctx = ctx_gguf.get();
|
LLAMA_LOG_INFO("%s: Dumping metadata keys/values.\n", __func__)
|
;
// get metadata as string
for (int i = 0; i < gguf_get_n_kv(gguf_ctx); i++) {
gguf_type type = gguf_get_kv_type(gguf_ctx, i);
const std::string type_name =
type == GGUF_TYPE_ARRAY
? format("%s[%s,%zu]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(gguf_ctx, i)), gguf_get_arr_n(gguf_ctx, i))
: gguf_type_name(type);
const char * name = gguf_get_key(gguf_ctx, i);
const std::string value = gguf_kv_to_str(gguf_ctx, i);
if (type != GGUF_TYPE_ARRAY) {
adapter.gguf_kv.emplace(name, value);
}
const size_t MAX_VALUE_LEN = 40;
|
ast_based
|
<fim_prefix> Input::get_singleton()->parse_input_event(evd);
Ref<InputEventMouseButton> evdd = evd->duplicate();
evdd->set_pressed(false);
evdd->set_button_mask(event_buttons_mask);
Input::get_singleton()->parse_input_event(evdd);
}
void AndroidInputHandler::process_magnify(Point2 p_pos, float p_factor) {
Ref<InputEventMagnifyGesture> magnify_event;
magnify_event.instantiate();
_set_key_modifier_state(magnify_event, Key::NONE);
magnify_event->set_position(p_pos);
magnify_event->set_factor(p_factor);
Input::get_singleton()->parse_input_event(magnify_event);
}
void AndroidInputHandler::process_pan(Point2 p_pos, Vector2 p_delta) {
Ref<InputEventPanGesture> pan_event;
pan_event.instantiate();<fim_suffix> pan_event->set_delta(p_delta);
Input::get_singleton()->parse_input_event(pan_event);
}
MouseButton AndroidInputHandler::_button_index_from_mask(BitField<MouseButtonMask> button_mask) {
switch (button_mask) {
case MouseButtonMask::LEFT:
return MouseButton::LEFT;
case MouseButtonMask::RIGHT:
return MouseButton::RIGHT;
case MouseButtonMask::MIDDLE:
return MouseButton::MIDDLE;
case MouseButtonMask::MB_XBUTTON1:
return MouseButton::MB_XBUTTON1;
case MouseButtonMask::MB_XBUTTON2:
return MouseButton::MB_XBUTTON2;
default:
return MouseButton::NONE;
}
}
<fim_middle> _set_key_modifier_state(pan_event, Key::NONE);
pan_event->set_position(p_pos);<fim_end>
|
Input::get_singleton()->parse_input_event(evd);
Ref<InputEventMouseButton> evdd = evd->duplicate();
evdd->set_pressed(false);
evdd->set_button_mask(event_buttons_mask);
Input::get_singleton()->parse_input_event(evdd);
}
void AndroidInputHandler::process_magnify(Point2 p_pos, float p_factor) {
Ref<InputEventMagnifyGesture> magnify_event;
magnify_event.instantiate();
_set_key_modifier_state(magnify_event, Key::NONE);
magnify_event->set_position(p_pos);
magnify_event->set_factor(p_factor);
Input::get_singleton()->parse_input_event(magnify_event);
}
void AndroidInputHandler::process_pan(Point2 p_pos, Vector2 p_delta) {
Ref<InputEventPanGesture> pan_event;
pan_event.instantiate();
|
_set_key_modifier_state(pan_event, Key::NONE);
pan_event->set_position(p_pos);
|
pan_event->set_delta(p_delta);
Input::get_singleton()->parse_input_event(pan_event);
}
MouseButton AndroidInputHandler::_button_index_from_mask(BitField<MouseButtonMask> button_mask) {
switch (button_mask) {
case MouseButtonMask::LEFT:
return MouseButton::LEFT;
case MouseButtonMask::RIGHT:
return MouseButton::RIGHT;
case MouseButtonMask::MIDDLE:
return MouseButton::MIDDLE;
case MouseButtonMask::MB_XBUTTON1:
return MouseButton::MB_XBUTTON1;
case MouseButtonMask::MB_XBUTTON2:
return MouseButton::MB_XBUTTON2;
default:
return MouseButton::NONE;
}
}
|
random
|
<fim_prefix>language;
last_oem_requested_ = oem;
#ifndef DISABLED_LEGACY_ENGINE
// For same language and datapath, just reset the adaptive classifier.
if (reset_classifier) {
tesseract_->ResetAdaptiveClassifier();
}
#endif // ndef DISABLED_LEGACY_ENGINE
return 0;
}
/**
* Returns the languages string used in the last valid initialization.
* If the last initialization specified "deu+hin" then that will be
* returned. If hin loaded eng automatically as well, then that will
* not be included in this list. To find the languages actually
* loaded use GetLoadedLanguagesAsVector.
* The returned string should NOT be deleted.
*/
const char *TessBaseAPI::GetInitLanguagesAsString() const {
<fim_suffix>
}
/**
* Returns the loaded languages in the vector of std::string.
* Includes all languages loaded by the last Init, including those loaded
* as dependencies of other loaded languages.
*/
void TessBaseAPI::GetLoadedLanguagesAsVector(std::vector<std::string> *langs) const {
langs->clear();
if (tesseract_ != nullptr) {
langs->push_back(tesseract_->lang);
int num_subs = tesseract_->num_sub_langs();
for (int i = 0; i < num_subs; ++i) {
langs->push_back(tesseract_->get_sub_lang(i)->lang);
}
}
}
/**
* Returns the available languages in the sorted vector of std::string.
*/
void TessBaseAPI::GetAvailableLanguagesAsVector(std::vector<std::string> *langs) const {
<fim_middle>return language_.c_str();<fim_end>
|
language;
last_oem_requested_ = oem;
#ifndef DISABLED_LEGACY_ENGINE
// For same language and datapath, just reset the adaptive classifier.
if (reset_classifier) {
tesseract_->ResetAdaptiveClassifier();
}
#endif // ndef DISABLED_LEGACY_ENGINE
return 0;
}
/**
* Returns the languages string used in the last valid initialization.
* If the last initialization specified "deu+hin" then that will be
* returned. If hin loaded eng automatically as well, then that will
* not be included in this list. To find the languages actually
* loaded use GetLoadedLanguagesAsVector.
* The returned string should NOT be deleted.
*/
const char *TessBaseAPI::GetInitLanguagesAsString() const {
|
return language_.c_str();
|
}
/**
* Returns the loaded languages in the vector of std::string.
* Includes all languages loaded by the last Init, including those loaded
* as dependencies of other loaded languages.
*/
void TessBaseAPI::GetLoadedLanguagesAsVector(std::vector<std::string> *langs) const {
langs->clear();
if (tesseract_ != nullptr) {
langs->push_back(tesseract_->lang);
int num_subs = tesseract_->num_sub_langs();
for (int i = 0; i < num_subs; ++i) {
langs->push_back(tesseract_->get_sub_lang(i)->lang);
}
}
}
/**
* Returns the available languages in the sorted vector of std::string.
*/
void TessBaseAPI::GetAvailableLanguagesAsVector(std::vector<std::string> *langs) const {
|
ast_based
|
<fim_prefix>}
void AnimationBezierTrackEdit::_play_position_draw() {
if (animation.is_null() || play_position_pos < 0) {
return;
}
float scale = timeline->get_zoom_scale();
int h = get_size().height;
int limit = timeline->get_name_limit();
int px = (-timeline->get_value() + play_position_pos) * scale + limit;
if (px >= limit && px < (get_size().width)) {
const Color color = get_theme_color(SNAME("accent_color"), EditorStringName(Editor));
play_position->draw_line(Point2(px, 0), Point2(px, h), color, Math::round(2 * EDSCALE));
}
}
void AnimationBezierTrackEdit::set_play_position(real_t p_pos) {
play_position_pos = p_pos;
play_position->queue_redraw();<fim_suffix>}
void AnimationBezierTrackEdit::set_root(Node *p_root) {
root = p_root;
}
void AnimationBezierTrackEdit::set_filtered(bool p_filtered) {
is_filtered = p_filtered;
if (animation.is_null()) {
return;
}
String base_path = String(animation->track_get_path(selected_track));
if (is_filtered) {
if (root && root->has_node(base_path)) {
Node *node = root->get_node(base_path);
if (!node || !EditorNode::get_singleton()->get_editor_selection()->is_selected(node)) {
for (int i = 0; i < animation->get_track_count(); ++i) {
if (animation->track_get_type(i) != Animation::TrackType::TYPE_BEZIER) {
continue;
}
base_path = String(animation->track_get_path(i));<fim_middle>}
void AnimationBezierTrackEdit::update_play_position() {
play_position->queue_redraw();<fim_end>
|
}
void AnimationBezierTrackEdit::_play_position_draw() {
if (animation.is_null() || play_position_pos < 0) {
return;
}
float scale = timeline->get_zoom_scale();
int h = get_size().height;
int limit = timeline->get_name_limit();
int px = (-timeline->get_value() + play_position_pos) * scale + limit;
if (px >= limit && px < (get_size().width)) {
const Color color = get_theme_color(SNAME("accent_color"), EditorStringName(Editor));
play_position->draw_line(Point2(px, 0), Point2(px, h), color, Math::round(2 * EDSCALE));
}
}
void AnimationBezierTrackEdit::set_play_position(real_t p_pos) {
play_position_pos = p_pos;
play_position->queue_redraw();
|
}
void AnimationBezierTrackEdit::update_play_position() {
play_position->queue_redraw();
|
}
void AnimationBezierTrackEdit::set_root(Node *p_root) {
root = p_root;
}
void AnimationBezierTrackEdit::set_filtered(bool p_filtered) {
is_filtered = p_filtered;
if (animation.is_null()) {
return;
}
String base_path = String(animation->track_get_path(selected_track));
if (is_filtered) {
if (root && root->has_node(base_path)) {
Node *node = root->get_node(base_path);
if (!node || !EditorNode::get_singleton()->get_editor_selection()->is_selected(node)) {
for (int i = 0; i < animation->get_track_count(); ++i) {
if (animation->track_get_type(i) != Animation::TrackType::TYPE_BEZIER) {
continue;
}
base_path = String(animation->track_get_path(i));
|
random
|
<fim_prefix> if (tesseract_ == nullptr || (!recognition_done_ && Recognize(nullptr) < 0)) {
return nullptr;
}
std::string text("");
const std::unique_ptr</*non-const*/ ResultIterator> it(GetIterator());
do {
if (it->Empty(RIL_PARA)) {
continue;
}
auto block_type = it->BlockType();
switch (block_type) {
case PT_FLOWING_IMAGE:
case PT_HEADING_IMAGE:
case PT_PULLOUT_IMAGE:
case PT_HORZ_LINE:
case PT_VERT_LINE:
// Ignore images and lines for text output.
continue;
case PT_NOISE:
tprintf("TODO: Please report image which triggers the noise case.\n");
ASSERT_HOST(false);
default:
break;
}
<fim_suffix>
static void AddBoxToTSV(const PageIterator *it, PageIteratorLevel level, std::string &text) {
int left, top, right, bottom;
it->BoundingBox(level, &left, &top, &right, &bottom);
text += "\t" + std::to_string(left);
text += "\t" + std::to_string(top);
text += "\t" + std::to_string(right - left);
text += "\t" + std::to_string(bottom - top);
}
/**
* Make a TSV-formatted string from the internal data structures.
* page_number is 0-based but will appear in the output as 1-based.
* Returned string must be freed with the delete [] operator.
*/
char *TessBaseAPI::GetTSVText(int page_number) {
if (tesseract_ == nullptr || (page_res_ == nullptr && Recognize(nullptr) < 0)) {<fim_middle> const std::unique_ptr<const char[]> para_text(it->GetUTF8Text(RIL_PARA));
text += para_text.get();
} while (it->Next(RIL_PARA));
return copy_string(text);
}<fim_end>
|
if (tesseract_ == nullptr || (!recognition_done_ && Recognize(nullptr) < 0)) {
return nullptr;
}
std::string text("");
const std::unique_ptr</*non-const*/ ResultIterator> it(GetIterator());
do {
if (it->Empty(RIL_PARA)) {
continue;
}
auto block_type = it->BlockType();
switch (block_type) {
case PT_FLOWING_IMAGE:
case PT_HEADING_IMAGE:
case PT_PULLOUT_IMAGE:
case PT_HORZ_LINE:
case PT_VERT_LINE:
// Ignore images and lines for text output.
continue;
case PT_NOISE:
tprintf("TODO: Please report image which triggers the noise case.\n");
ASSERT_HOST(false);
default:
break;
}
|
const std::unique_ptr<const char[]> para_text(it->GetUTF8Text(RIL_PARA));
text += para_text.get();
} while (it->Next(RIL_PARA));
return copy_string(text);
}
|
static void AddBoxToTSV(const PageIterator *it, PageIteratorLevel level, std::string &text) {
int left, top, right, bottom;
it->BoundingBox(level, &left, &top, &right, &bottom);
text += "\t" + std::to_string(left);
text += "\t" + std::to_string(top);
text += "\t" + std::to_string(right - left);
text += "\t" + std::to_string(bottom - top);
}
/**
* Make a TSV-formatted string from the internal data structures.
* page_number is 0-based but will appear in the output as 1-based.
* Returned string must be freed with the delete [] operator.
*/
char *TessBaseAPI::GetTSVText(int page_number) {
if (tesseract_ == nullptr || (page_res_ == nullptr && Recognize(nullptr) < 0)) {
|
random
|
<fim_prefix>JoypadEvent p_event) {
switch (p_event.type) {
case JOY_EVENT_BUTTON:
Input::get_singleton()->joy_button(p_event.device, (JoyButton)p_event.index, p_event.pressed);
break;
case JOY_EVENT_AXIS:
Input::get_singleton()->joy_axis(p_event.device, (JoyAxis)p_event.index, p_event.value);
break;
case JOY_EVENT_HAT:
Input::get_singleton()->joy_hat(p_event.device, p_event.hat);
break;
default:
return;
}
}
void AndroidInputHandler::_set_key_modifier_state(Ref<InputEventWithModifiers> ev, Key p_keycode) {
if (p_keycode != Key::SHIFT) {
ev->set_shift_pressed(shift_mem);
}
if (p_keycode != Key::ALT) {
ev->set_alt_pressed(alt_mem);
}
if (p_keycode != Key::META) {
<fim_suffix>;
}
if (p_keycode != Key::CTRL) {
ev->set_ctrl_pressed(control_mem);
}
}
void AndroidInputHandler::process_key_event(int p_physical_keycode, int p_unicode, int p_key_label, bool p_pressed, bool p_echo) {
static char32_t prev_wc = 0;
char32_t unicode = p_unicode;
if ((p_unicode & 0xfffffc00) == 0xd800) {
if (prev_wc != 0) {
ERR_PRINT("invalid utf16 surrogate input");
}
prev_wc = unicode;
return; // Skip surrogate.
} else if ((unicode & 0xfffffc00) == 0xdc00) {
if (prev_wc == 0) {
ERR_PRINT("invalid utf16 surrogate input");
return; // Skip invalid surrogate.
}
unicode = (prev_wc << 10UL) + unicode - ((0xd800 << 10UL) + 0xdc00 - 0x10000);
prev_wc = 0;
} else <fim_middle>ev->set_meta_pressed(meta_mem)<fim_end>
|
JoypadEvent p_event) {
switch (p_event.type) {
case JOY_EVENT_BUTTON:
Input::get_singleton()->joy_button(p_event.device, (JoyButton)p_event.index, p_event.pressed);
break;
case JOY_EVENT_AXIS:
Input::get_singleton()->joy_axis(p_event.device, (JoyAxis)p_event.index, p_event.value);
break;
case JOY_EVENT_HAT:
Input::get_singleton()->joy_hat(p_event.device, p_event.hat);
break;
default:
return;
}
}
void AndroidInputHandler::_set_key_modifier_state(Ref<InputEventWithModifiers> ev, Key p_keycode) {
if (p_keycode != Key::SHIFT) {
ev->set_shift_pressed(shift_mem);
}
if (p_keycode != Key::ALT) {
ev->set_alt_pressed(alt_mem);
}
if (p_keycode != Key::META) {
|
ev->set_meta_pressed(meta_mem)
|
;
}
if (p_keycode != Key::CTRL) {
ev->set_ctrl_pressed(control_mem);
}
}
void AndroidInputHandler::process_key_event(int p_physical_keycode, int p_unicode, int p_key_label, bool p_pressed, bool p_echo) {
static char32_t prev_wc = 0;
char32_t unicode = p_unicode;
if ((p_unicode & 0xfffffc00) == 0xd800) {
if (prev_wc != 0) {
ERR_PRINT("invalid utf16 surrogate input");
}
prev_wc = unicode;
return; // Skip surrogate.
} else if ((unicode & 0xfffffc00) == 0xdc00) {
if (prev_wc == 0) {
ERR_PRINT("invalid utf16 surrogate input");
return; // Skip invalid surrogate.
}
unicode = (prev_wc << 10UL) + unicode - ((0xd800 << 10UL) + 0xdc00 - 0x10000);
prev_wc = 0;
} else
|
ast_based
|
<fim_prefix>
#include "tensorflow/compiler/aot/aot_only_var_handle_op.h"
#include "tensorflow/compiler/tf2xla/xla_context.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
namespace {
// Implementation of varhandle that binds a VarHandleOp to an XlaResource of the
// same name. It is not safe to use this op in a JIT context.
class XlaAotOnlyVarHandleOp : public XlaOpKernel {
public:
explicit XlaAotOnlyVarHandleOp(OpKernelConstruction* c);
void Compile(XlaOpKernelContext* context) override;
private:
string name_;
};<fim_suffix> : XlaOpKernel(c) {
OP_REQUIRES_OK(c, c->GetAttr("shared_name", &name_));
}
void XlaAotOnlyVarHandleOp::Compile(XlaOpKernelContext* context) {
// Look for a resource of the same name. TF also keys that on the container
// and type attributes, but that doesn't seem necessary.
for (const auto& resource : context->xla_context()->resources()) {
if (resource->kind() == XlaResource::kVariable &&
resource->name() == name_) {
context->SetResourceOutput(0, resource.get());
return;
}
}
context->SetStatus(
errors::InvalidArgument("Variable: ", name_, " not configured"));
}
} // namespace
REGISTER_OP(tfcompile::kXlaAotOnlyVarHandleOp)
.Doc(R"doc(<fim_middle>
XlaAotOnlyVarHandleOp::XlaAotOnlyVarHandleOp(OpKernelConstruction* c)<fim_end>
|
#include "tensorflow/compiler/aot/aot_only_var_handle_op.h"
#include "tensorflow/compiler/tf2xla/xla_context.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
namespace {
// Implementation of varhandle that binds a VarHandleOp to an XlaResource of the
// same name. It is not safe to use this op in a JIT context.
class XlaAotOnlyVarHandleOp : public XlaOpKernel {
public:
explicit XlaAotOnlyVarHandleOp(OpKernelConstruction* c);
void Compile(XlaOpKernelContext* context) override;
private:
string name_;
};
|
XlaAotOnlyVarHandleOp::XlaAotOnlyVarHandleOp(OpKernelConstruction* c)
|
: XlaOpKernel(c) {
OP_REQUIRES_OK(c, c->GetAttr("shared_name", &name_));
}
void XlaAotOnlyVarHandleOp::Compile(XlaOpKernelContext* context) {
// Look for a resource of the same name. TF also keys that on the container
// and type attributes, but that doesn't seem necessary.
for (const auto& resource : context->xla_context()->resources()) {
if (resource->kind() == XlaResource::kVariable &&
resource->name() == name_) {
context->SetResourceOutput(0, resource.get());
return;
}
}
context->SetStatus(
errors::InvalidArgument("Variable: ", name_, " not configured"));
}
} // namespace
REGISTER_OP(tfcompile::kXlaAotOnlyVarHandleOp)
.Doc(R"doc(
|
random
|
<fim_prefix> calling IteratorGetNextOp on a tf.data iterator.
static constexpr char kIteratorElementLayouts[] = "tf._element_layouts";
// Attribute used in tf.data ops which stores the shapes of the output elements.
static constexpr char kIteratorOutputShapes[] = "output_shapes";
// The number of list of regular tensors used to represent sparse tensors.
static constexpr int kSparseTensorNum = 3;
// Attribute which stores the environment variable value for all_reduce
// optimization group size: DTENSOR_ALLREDUCE_COMBINE_OPTIMIZATION_GROUP_SIZE.
// This represents the maximum number of AllReduce ops to merge into one op. It
// is a determining factor used during dtensor_allreduce_combine_optimization.
<fim_suffix>
// Attribute which stores the environment variable value for whether
// multi-device expansion is enabled: DTENSOR_ENABLE_MULTI_DEVICE_EXPANSION.
static constexpr char kEnableMultiDeviceMode[] =
"dtensor.enable_multi_device_mode";
// Attribute which stores the environment variable value for all_reduce
// optimization group size: DTENSOR_ALLREDUCE_COMBINE_OPTIMIZATION_GROUP_SIZE.
// This represents the maximum distance between two AllReduce on the compute
// graph in terms of topological level. It is a determining factor used during
// dtensor_allreduce_combine_optimization.
static constexpr char kAllReduceTopologicalDistance[] =
"dtensor.all_reduce_combiner.topological_distance";
<fim_middle>static constexpr char kAllReduceNumOpsInGroup[] =
"dtensor.all_reduce_combiner.num_ops_in_group";<fim_end>
|
calling IteratorGetNextOp on a tf.data iterator.
static constexpr char kIteratorElementLayouts[] = "tf._element_layouts";
// Attribute used in tf.data ops which stores the shapes of the output elements.
static constexpr char kIteratorOutputShapes[] = "output_shapes";
// The number of list of regular tensors used to represent sparse tensors.
static constexpr int kSparseTensorNum = 3;
// Attribute which stores the environment variable value for all_reduce
// optimization group size: DTENSOR_ALLREDUCE_COMBINE_OPTIMIZATION_GROUP_SIZE.
// This represents the maximum number of AllReduce ops to merge into one op. It
// is a determining factor used during dtensor_allreduce_combine_optimization.
|
static constexpr char kAllReduceNumOpsInGroup[] =
"dtensor.all_reduce_combiner.num_ops_in_group";
|
// Attribute which stores the environment variable value for whether
// multi-device expansion is enabled: DTENSOR_ENABLE_MULTI_DEVICE_EXPANSION.
static constexpr char kEnableMultiDeviceMode[] =
"dtensor.enable_multi_device_mode";
// Attribute which stores the environment variable value for all_reduce
// optimization group size: DTENSOR_ALLREDUCE_COMBINE_OPTIMIZATION_GROUP_SIZE.
// This represents the maximum distance between two AllReduce on the compute
// graph in terms of topological level. It is a determining factor used during
// dtensor_allreduce_combine_optimization.
static constexpr char kAllReduceTopologicalDistance[] =
"dtensor.all_reduce_combiner.topological_distance";
|
ast_based
|
<fim_prefix> const uint8_t *src_data = r_img->ptr();
for (int i = 0; i < mip_count + 1; i++) {
int src_mip_w, src_mip_h;
const int64_t src_ofs = Image::get_image_mipmap_offset_and_dimensions(width, height, r_img->get_format(), i, src_mip_w, src_mip_h);
const uint8_t *mip_data = &src_data[src_ofs];
const int64_t dst_ofs = Image::get_image_mipmap_offset(width, height, target_format, i);
uint8_t *dest_mip_write = &dest_write[dst_ofs];
// Ensure that mip offset is a multiple of 8 (etcpak expects uint64_t pointer).
if (unlikely(dst_ofs % 8 != 0)) {
astcenc_context_free(context);
ERR_FAIL_MSG("astcenc: Mip offset is not a multiple of 8.");
}
// Compress image.<fim_suffix> image.dim_x = src_mip_w;
image.dim_y = src_mip_h;
image.dim_z = 1;
if (r_img->get_format() == Image::FORMAT_RGBA8) {
image.data_type = ASTCENC_TYPE_U8;
} else if (r_img->get_format() == Image::FORMAT_RGBAH) {
image.data_type = ASTCENC_TYPE_F16;
} else {
image.data_type = ASTCENC_TYPE_F32;
}
image.data = (void **)(&mip_data);
// Compute the number of ASTC blocks in each dimension.
unsigned int block_count_x = (src_mip_w + block_x - 1) / block_x;
unsigned int block_count_y = (src_mip_h + block_y - 1) / block_y;
size_t comp_len = block_count_x * block_count_y * 16;
const astcenc_swizzle swizzle = {<fim_middle> astcenc_image image;<fim_end>
|
const uint8_t *src_data = r_img->ptr();
for (int i = 0; i < mip_count + 1; i++) {
int src_mip_w, src_mip_h;
const int64_t src_ofs = Image::get_image_mipmap_offset_and_dimensions(width, height, r_img->get_format(), i, src_mip_w, src_mip_h);
const uint8_t *mip_data = &src_data[src_ofs];
const int64_t dst_ofs = Image::get_image_mipmap_offset(width, height, target_format, i);
uint8_t *dest_mip_write = &dest_write[dst_ofs];
// Ensure that mip offset is a multiple of 8 (etcpak expects uint64_t pointer).
if (unlikely(dst_ofs % 8 != 0)) {
astcenc_context_free(context);
ERR_FAIL_MSG("astcenc: Mip offset is not a multiple of 8.");
}
// Compress image.
|
astcenc_image image;
|
image.dim_x = src_mip_w;
image.dim_y = src_mip_h;
image.dim_z = 1;
if (r_img->get_format() == Image::FORMAT_RGBA8) {
image.data_type = ASTCENC_TYPE_U8;
} else if (r_img->get_format() == Image::FORMAT_RGBAH) {
image.data_type = ASTCENC_TYPE_F16;
} else {
image.data_type = ASTCENC_TYPE_F32;
}
image.data = (void **)(&mip_data);
// Compute the number of ASTC blocks in each dimension.
unsigned int block_count_x = (src_mip_w + block_x - 1) / block_x;
unsigned int block_count_y = (src_mip_h + block_y - 1) / block_y;
size_t comp_len = block_count_x * block_count_y * 16;
const astcenc_swizzle swizzle = {
|
random
|
<fim_prefix>ide the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_classname.is_empty()) {
accesskit_node_set_class_name(ae->node, p_classname.utf8().ptr());
} else {
accesskit_node_clear_class_name(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_placeholder(const RID &p_id, const String &p_placeholder) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if <fim_suffix> else {
accesskit_node_clear_placeholder(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_language(const RID &p_id, const String &p_language) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_language(ae->node, p_language.utf8().ptr());
}
void AccessibilityDriverAccessKit::accessibility_update_set_text_orientation(const RID &p_id, bool p_vertical) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside th<fim_middle>(!p_placeholder.is_empty()) {
accesskit_node_set_placeholder(ae->node, p_placeholder.utf8().ptr());
}<fim_end>
|
ide the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_classname.is_empty()) {
accesskit_node_set_class_name(ae->node, p_classname.utf8().ptr());
} else {
accesskit_node_clear_class_name(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_placeholder(const RID &p_id, const String &p_placeholder) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if
|
(!p_placeholder.is_empty()) {
accesskit_node_set_placeholder(ae->node, p_placeholder.utf8().ptr());
}
|
else {
accesskit_node_clear_placeholder(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_language(const RID &p_id, const String &p_language) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_language(ae->node, p_language.utf8().ptr());
}
void AccessibilityDriverAccessKit::accessibility_update_set_text_orientation(const RID &p_id, bool p_vertical) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside th
|
ast_based
|
<fim_prefix>*ad = static_cast<AudioDriverDummy *>(p_udata);
uint64_t usdelay = (ad->buffer_frames / float(ad->mix_rate)) * 1000000;
while (!ad->exit_thread.is_set()) {
if (ad->active.is_set()) {
ad->lock();
ad->start_counting_ticks();
ad->audio_server_process(ad->buffer_frames, ad->samples_in);
ad->stop_counting_ticks();
ad->unlock();
}
OS::get_singleton()->delay_usec(usdelay);
}
}
void AudioDriverDummy::start() {
active.set();
}
int AudioDriverDummy::get_mix_rate() const {
return mix_rate;
}
AudioDriver::SpeakerMode AudioDriverDummy::get_speaker_mode() const {
return speaker_mode;
}
void AudioDriverDummy::lock() {
mutex.lock();
}
void AudioDriverDummy::unlock() {
<fim_suffix>;
}
void AudioDriverDummy::set_use_threads(bool p_use_threads) {
use_threads = p_use_threads;
}
void AudioDriverDummy::set_speaker_mode(SpeakerMode p_mode) {
speaker_mode = p_mode;
}
void AudioDriverDummy::set_mix_rate(int p_rate) {
mix_rate = p_rate;
}
uint32_t AudioDriverDummy::get_channels() const {
static const int channels_for_mode[4] = { 2, 4, 8, 16 };
return channels_for_mode[speaker_mode];
}
void AudioDriverDummy::mix_audio(int p_frames, int32_t *p_buffer) {
ERR_FAIL_COND(!active.is_set()); // If not active, should not mix.
ERR_FAIL_COND(use_threads == true); // If using threads, this will not work well.
uint32_t todo = p_frames;
while (todo) {
uint32_t to_mix = MIN(<fim_middle>mutex.unlock()<fim_end>
|
*ad = static_cast<AudioDriverDummy *>(p_udata);
uint64_t usdelay = (ad->buffer_frames / float(ad->mix_rate)) * 1000000;
while (!ad->exit_thread.is_set()) {
if (ad->active.is_set()) {
ad->lock();
ad->start_counting_ticks();
ad->audio_server_process(ad->buffer_frames, ad->samples_in);
ad->stop_counting_ticks();
ad->unlock();
}
OS::get_singleton()->delay_usec(usdelay);
}
}
void AudioDriverDummy::start() {
active.set();
}
int AudioDriverDummy::get_mix_rate() const {
return mix_rate;
}
AudioDriver::SpeakerMode AudioDriverDummy::get_speaker_mode() const {
return speaker_mode;
}
void AudioDriverDummy::lock() {
mutex.lock();
}
void AudioDriverDummy::unlock() {
|
mutex.unlock()
|
;
}
void AudioDriverDummy::set_use_threads(bool p_use_threads) {
use_threads = p_use_threads;
}
void AudioDriverDummy::set_speaker_mode(SpeakerMode p_mode) {
speaker_mode = p_mode;
}
void AudioDriverDummy::set_mix_rate(int p_rate) {
mix_rate = p_rate;
}
uint32_t AudioDriverDummy::get_channels() const {
static const int channels_for_mode[4] = { 2, 4, 8, 16 };
return channels_for_mode[speaker_mode];
}
void AudioDriverDummy::mix_audio(int p_frames, int32_t *p_buffer) {
ERR_FAIL_COND(!active.is_set()); // If not active, should not mix.
ERR_FAIL_COND(use_threads == true); // If using threads, this will not work well.
uint32_t todo = p_frames;
while (todo) {
uint32_t to_mix = MIN(
|
ast_based
|
<fim_prefix>ter(const char *utf8_character) const {
return tesseract_->unicharset.contains_unichar(utf8_character);
}
// TODO(rays) Obsolete this function and replace with a more aptly named
// function that returns image coordinates rather than tesseract coordinates.
bool TessBaseAPI::GetTextDirection(int *out_offset, float *out_slope) {
const std::unique_ptr<const PageIterator> it(AnalyseLayout());
if (it == nullptr) {
return false;
}
int x1, x2, y1, y2;
it->Baseline(RIL_TEXTLINE, &x1, &y1, &x2, &y2);
// Calculate offset and slope (NOTE: Kind of ugly)
if (x2 <= x1) {
x2 = x1 + 1;
}
// Convert the point pair to slope/offset of the baseline (in image coords.)
*out_slope = <fim_suffix> / (x2 - x1);
*out_offset = static_cast<int>(y1 - *out_slope * x1);
// Get the y-coord of the baseline at the left and right edges of the
// textline's bounding box.
int left, top, right, bottom;
if (!it->BoundingBox(RIL_TEXTLINE, &left, &top, &right, &bottom)) {
return false;
}
int left_y = IntCastRounded(*out_slope * left + *out_offset);
int right_y = IntCastRounded(*out_slope * right + *out_offset);
// Shift the baseline down so it passes through the nearest bottom-corner
// of the textline's bounding box. This is the difference between the y
// at the lowest (max) edge of the box and the actual box bottom.
*out_offset += bottom - std::max(left_y, right_y);
/<fim_middle>static_cast<float>(y2 - y1)<fim_end>
|
ter(const char *utf8_character) const {
return tesseract_->unicharset.contains_unichar(utf8_character);
}
// TODO(rays) Obsolete this function and replace with a more aptly named
// function that returns image coordinates rather than tesseract coordinates.
bool TessBaseAPI::GetTextDirection(int *out_offset, float *out_slope) {
const std::unique_ptr<const PageIterator> it(AnalyseLayout());
if (it == nullptr) {
return false;
}
int x1, x2, y1, y2;
it->Baseline(RIL_TEXTLINE, &x1, &y1, &x2, &y2);
// Calculate offset and slope (NOTE: Kind of ugly)
if (x2 <= x1) {
x2 = x1 + 1;
}
// Convert the point pair to slope/offset of the baseline (in image coords.)
*out_slope =
|
static_cast<float>(y2 - y1)
|
/ (x2 - x1);
*out_offset = static_cast<int>(y1 - *out_slope * x1);
// Get the y-coord of the baseline at the left and right edges of the
// textline's bounding box.
int left, top, right, bottom;
if (!it->BoundingBox(RIL_TEXTLINE, &left, &top, &right, &bottom)) {
return false;
}
int left_y = IntCastRounded(*out_slope * left + *out_offset);
int right_y = IntCastRounded(*out_slope * right + *out_offset);
// Shift the baseline down so it passes through the nearest bottom-corner
// of the textline's bounding box. This is the difference between the y
// at the lowest (max) edge of the box and the actual box bottom.
*out_offset += bottom - std::max(left_y, right_y);
/
|
ast_based
|
<fim_prefix> = nullptr;
int64_t gl_count = 0;
int64_t gl_index = 0;
float run_off_x = 0.0;
Vector2i full_range;
if (p_shaped_text.is_valid()) {
text_width = TS->shaped_text_get_size(p_shaped_text).x;
text_height = MAX(text_height, TS->shaped_text_get_size(p_shaped_text).y);
words = TS->shaped_text_get_word_breaks(p_shaped_text);
run_count = TS->shaped_get_run_count(p_shaped_text);
gl = TS->shaped_text_get_glyphs(p_shaped_text);
gl_count = TS->shaped_text_get_glyph_count(p_shaped_text);
full_range = TS->shaped_text_get_range(p_shaped_text);
}
accesskit_rect root_rect;
root_rect.x0 = 0;
root_rect.y0 = 0;
root_rect.x1 = text_width;
root_rect.y1 = MAX(p_min_height, text_height);
<fim_suffix>;
// Create text element for each run.
Vector<AccessibilityElement *> text_elements;
for (int64_t i = 0; i < run_count; i++) {
const Vector2i range = TS->shaped_get_run_range(p_shaped_text, i);
String t = TS->shaped_get_run_text(p_shaped_text, i);
if (t.is_empty()) {
continue;
}
AccessibilityElement *ae = memnew(AccessibilityElement);
ae->role = ACCESSKIT_ROLE_TEXT_RUN;
ae->window_id = parent_ae->window_id;
ae->parent = root_rid;
ae->run = Vector3i(range.x, range.y, i);
ae->node = accesskit_node_new(ae->role);
text_elements.push_back(ae);
// UTF-8 text and char lengths.
Vector<uint8_t> char_lengths;
CharString text = t.utf8(&char_lengths);
accesskit_<fim_middle>accesskit_node_set_bounds(root_ae->node, root_rect)<fim_end>
|
= nullptr;
int64_t gl_count = 0;
int64_t gl_index = 0;
float run_off_x = 0.0;
Vector2i full_range;
if (p_shaped_text.is_valid()) {
text_width = TS->shaped_text_get_size(p_shaped_text).x;
text_height = MAX(text_height, TS->shaped_text_get_size(p_shaped_text).y);
words = TS->shaped_text_get_word_breaks(p_shaped_text);
run_count = TS->shaped_get_run_count(p_shaped_text);
gl = TS->shaped_text_get_glyphs(p_shaped_text);
gl_count = TS->shaped_text_get_glyph_count(p_shaped_text);
full_range = TS->shaped_text_get_range(p_shaped_text);
}
accesskit_rect root_rect;
root_rect.x0 = 0;
root_rect.y0 = 0;
root_rect.x1 = text_width;
root_rect.y1 = MAX(p_min_height, text_height);
|
accesskit_node_set_bounds(root_ae->node, root_rect)
|
;
// Create text element for each run.
Vector<AccessibilityElement *> text_elements;
for (int64_t i = 0; i < run_count; i++) {
const Vector2i range = TS->shaped_get_run_range(p_shaped_text, i);
String t = TS->shaped_get_run_text(p_shaped_text, i);
if (t.is_empty()) {
continue;
}
AccessibilityElement *ae = memnew(AccessibilityElement);
ae->role = ACCESSKIT_ROLE_TEXT_RUN;
ae->window_id = parent_ae->window_id;
ae->parent = root_rid;
ae->run = Vector3i(range.x, range.y, i);
ae->node = accesskit_node_new(ae->role);
text_elements.push_back(ae);
// UTF-8 text and char lengths.
Vector<uint8_t> char_lengths;
CharString text = t.utf8(&char_lengths);
accesskit_
|
ast_based
|
<fim_prefix> res_it->Next(RIL_SYMBOL);
} while (!res_it->Empty(RIL_BLOCK) && !res_it->IsAtBeginningOf(RIL_WORD));
tsv_str += "\n"; // end of row
#if !defined(NDEBUG)
wcnt++;
#endif
}
return copy_string(tsv_str);
}
/** The 5 numbers output for each box (the usual 4 and a page number.) */
const int kNumbersPerBlob = 5;
/**
* The number of bytes taken by each number. Since we use int16_t for ICOORD,
* assume only 5 digits max.
*/
const int kBytesPerNumber = 5;
/**
* Multiplier for max expected textlength assumes (kBytesPerNumber + space)
* * kNumbersPerBlob plus the newline. Add to this the
* original UTF8 characters, and one kMaxBytesPerLine for safety.<fim_suffix>const int kBytesPer64BitNumber = 20;
/**
* A maximal single box could occupy kNumbersPerBlob numbers at
* kBytesPer64BitNumber digits (if someone sneaks in a 64 bit value) and a
* space plus the newline and the maximum length of a UNICHAR.
* Test against this on each iteration for safety.
*/
const int kMaxBytesPerLine = kNumbersPerBlob * (kBytesPer64BitNumber + 1) + 1 + UNICHAR_LEN;
/**
* The recognized text is returned as a char* which is coded
* as a UTF8 box file.
* page_number is a 0-base page index that will appear in the box file.
* Returned string must be freed with the delete [] operator.
*/
char *TessBaseAPI::GetBoxText(int page_number) {<fim_middle> */
const int kBytesPerBoxFileLine = (kBytesPerNumber + 1) * kNumbersPerBlob + 1;
/** Max bytes in the decimal representation of int64_t. */<fim_end>
|
res_it->Next(RIL_SYMBOL);
} while (!res_it->Empty(RIL_BLOCK) && !res_it->IsAtBeginningOf(RIL_WORD));
tsv_str += "\n"; // end of row
#if !defined(NDEBUG)
wcnt++;
#endif
}
return copy_string(tsv_str);
}
/** The 5 numbers output for each box (the usual 4 and a page number.) */
const int kNumbersPerBlob = 5;
/**
* The number of bytes taken by each number. Since we use int16_t for ICOORD,
* assume only 5 digits max.
*/
const int kBytesPerNumber = 5;
/**
* Multiplier for max expected textlength assumes (kBytesPerNumber + space)
* * kNumbersPerBlob plus the newline. Add to this the
* original UTF8 characters, and one kMaxBytesPerLine for safety.
|
*/
const int kBytesPerBoxFileLine = (kBytesPerNumber + 1) * kNumbersPerBlob + 1;
/** Max bytes in the decimal representation of int64_t. */
|
const int kBytesPer64BitNumber = 20;
/**
* A maximal single box could occupy kNumbersPerBlob numbers at
* kBytesPer64BitNumber digits (if someone sneaks in a 64 bit value) and a
* space plus the newline and the maximum length of a UNICHAR.
* Test against this on each iteration for safety.
*/
const int kMaxBytesPerLine = kNumbersPerBlob * (kBytesPer64BitNumber + 1) + 1 + UNICHAR_LEN;
/**
* The recognized text is returned as a char* which is coded
* as a UTF8 box file.
* page_number is a 0-base page index that will appear in the box file.
* Returned string must be freed with the delete [] operator.
*/
char *TessBaseAPI::GetBoxText(int page_number) {
|
random
|
<fim_prefix> if (ferror(fp)) {
die_fmt("fread failed: %s", strerror(errno));
}
if (ret != 1) {
die("unexpectedly reached end of file");
}
}
std::uint32_t read_u32() {
std::uint32_t ret;
read_raw(&ret, sizeof(ret));
return ret;
}
std::float_t read_f32() {
std::float_t ret;
read_raw(&ret, sizeof(ret));
return ret;
}
std::string read_string(std::uint32_t len) {
std::vector<char> chars(len);
read_raw(chars.data(), len);
return std::string(chars.data(), len);
}
~my_llama_file() {
if (fp) {
std::fclose(fp);
}
}
};<fim_suffix> return false;
}
std::string magic = file.read_string(4);
return magic == GGUF_MAGIC;
}
static std::string llama_escape_whitespaces(const std::string & text) {
std::ostringstream out;
for (char c : text) {
if (c == ' ') out << "\xe2\x96\x81";
else out << c;
}
return out.str();
}
static void load_vocab(const char * filename, const Config * config, struct my_llama_vocab * vocab) {
if (is_ggml_file(filename)) {
LOG_INF("%s: Loading vocabulary from gguf file %s\n", __func__, filename);
struct ggml_context * ctx_data = NULL;
struct gguf_init_params params = {
/*.no_alloc = */ false,<fim_middle>
static bool is_ggml_file(const char * filename) {
my_llama_file file(filename, "rb");
if (file.size < 4) {<fim_end>
|
if (ferror(fp)) {
die_fmt("fread failed: %s", strerror(errno));
}
if (ret != 1) {
die("unexpectedly reached end of file");
}
}
std::uint32_t read_u32() {
std::uint32_t ret;
read_raw(&ret, sizeof(ret));
return ret;
}
std::float_t read_f32() {
std::float_t ret;
read_raw(&ret, sizeof(ret));
return ret;
}
std::string read_string(std::uint32_t len) {
std::vector<char> chars(len);
read_raw(chars.data(), len);
return std::string(chars.data(), len);
}
~my_llama_file() {
if (fp) {
std::fclose(fp);
}
}
};
|
static bool is_ggml_file(const char * filename) {
my_llama_file file(filename, "rb");
if (file.size < 4) {
|
return false;
}
std::string magic = file.read_string(4);
return magic == GGUF_MAGIC;
}
static std::string llama_escape_whitespaces(const std::string & text) {
std::ostringstream out;
for (char c : text) {
if (c == ' ') out << "\xe2\x96\x81";
else out << c;
}
return out.str();
}
static void load_vocab(const char * filename, const Config * config, struct my_llama_vocab * vocab) {
if (is_ggml_file(filename)) {
LOG_INF("%s: Loading vocabulary from gguf file %s\n", __func__, filename);
struct ggml_context * ctx_data = NULL;
struct gguf_init_params params = {
/*.no_alloc = */ false,
|
random
|
<fim_prefix>SHIFT: return ImGuiKey_LeftShift;
case ALLEGRO_KEY_ALT: return ImGuiKey_LeftAlt;
case ALLEGRO_KEY_LWIN: return ImGuiKey_LeftSuper;
case ALLEGRO_KEY_RCTRL: return ImGuiKey_RightCtrl;
case ALLEGRO_KEY_RSHIFT: return ImGuiKey_RightShift;
case ALLEGRO_KEY_ALTGR: return ImGuiKey_RightAlt;
case ALLEGRO_KEY_RWIN: return ImGuiKey_RightSuper;
case ALLEGRO_KEY_MENU: return ImGuiKey_Menu;
case ALLEGRO_KEY_0: return ImGuiKey_0;
case ALLEGRO_KEY_1: return ImGuiKey_1;
case ALLEGRO_KEY_2: return ImGuiKey_2;
case ALLEGRO_KEY_3: return ImGuiKey_3;
case ALLEGRO_KEY_4: return ImGuiKey_4;
case ALLEGRO_KEY_5: <fim_suffix>
case ALLEGRO_KEY_6: return ImGuiKey_6;
case ALLEGRO_KEY_7: return ImGuiKey_7;
case ALLEGRO_KEY_8: return ImGuiKey_8;
case ALLEGRO_KEY_9: return ImGuiKey_9;
case ALLEGRO_KEY_A: return ImGuiKey_A;
case ALLEGRO_KEY_B: return ImGuiKey_B;
case ALLEGRO_KEY_C: return ImGuiKey_C;
case ALLEGRO_KEY_D: return ImGuiKey_D;
case ALLEGRO_KEY_E: return ImGuiKey_E;
case ALLEGRO_KEY_F: return ImGuiKey_F;
case ALLEGRO_KEY_G: return ImGuiKey_G;
case ALLEGRO_KEY_H: return ImGuiKey_H;
case ALLEGRO_KEY_I: return ImGuiKey_I;
case ALLEGRO_KEY_J: return ImGuiKey_J;
case ALLEGRO_KEY_K: return ImGuiK<fim_middle>return ImGuiKey_5;<fim_end>
|
SHIFT: return ImGuiKey_LeftShift;
case ALLEGRO_KEY_ALT: return ImGuiKey_LeftAlt;
case ALLEGRO_KEY_LWIN: return ImGuiKey_LeftSuper;
case ALLEGRO_KEY_RCTRL: return ImGuiKey_RightCtrl;
case ALLEGRO_KEY_RSHIFT: return ImGuiKey_RightShift;
case ALLEGRO_KEY_ALTGR: return ImGuiKey_RightAlt;
case ALLEGRO_KEY_RWIN: return ImGuiKey_RightSuper;
case ALLEGRO_KEY_MENU: return ImGuiKey_Menu;
case ALLEGRO_KEY_0: return ImGuiKey_0;
case ALLEGRO_KEY_1: return ImGuiKey_1;
case ALLEGRO_KEY_2: return ImGuiKey_2;
case ALLEGRO_KEY_3: return ImGuiKey_3;
case ALLEGRO_KEY_4: return ImGuiKey_4;
case ALLEGRO_KEY_5:
|
return ImGuiKey_5;
|
case ALLEGRO_KEY_6: return ImGuiKey_6;
case ALLEGRO_KEY_7: return ImGuiKey_7;
case ALLEGRO_KEY_8: return ImGuiKey_8;
case ALLEGRO_KEY_9: return ImGuiKey_9;
case ALLEGRO_KEY_A: return ImGuiKey_A;
case ALLEGRO_KEY_B: return ImGuiKey_B;
case ALLEGRO_KEY_C: return ImGuiKey_C;
case ALLEGRO_KEY_D: return ImGuiKey_D;
case ALLEGRO_KEY_E: return ImGuiKey_E;
case ALLEGRO_KEY_F: return ImGuiKey_F;
case ALLEGRO_KEY_G: return ImGuiKey_G;
case ALLEGRO_KEY_H: return ImGuiKey_H;
case ALLEGRO_KEY_I: return ImGuiKey_I;
case ALLEGRO_KEY_J: return ImGuiKey_J;
case ALLEGRO_KEY_K: return ImGuiK
|
ast_based
|
<fim_prefix>
run3Calibration(imgpt[0], imgpt[1], imgpt[2], imageSize,
boardSize, squareSize, aspectRatio, flags|CALIB_FIX_K4|CALIB_FIX_K5,
cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],
cameraMatrix[2], distCoeffs[2],
R12, T12, R13, T13);
fs.open(outputFilename, FileStorage::WRITE);
fs << "cameraMatrix1" << cameraMatrix[0];
fs << "cameraMatrix2" << cameraMatrix[1];
fs << "cameraMatrix3" << cameraMatrix[2];
fs << "distCoeffs1" << distCoeffs[0];
fs << "distCoeffs2" << distCoeffs[1];
fs << "distCoeffs3" << distCoeffs[2];<fim_suffix> fs << "T13" << T13;
fs << "imageWidth" << imageSize.width;
fs << "imageHeight" << imageSize.height;
Mat Q;
// step 3: find rectification transforms
double ratio = rectify3Collinear(cameraMatrix[0], distCoeffs[0], cameraMatrix[1],
distCoeffs[1], cameraMatrix[2], distCoeffs[2],
imgpt[0], imgpt[2],
imageSize, R12, T12, R13, T13,
R[0], R[1], R[2], P[0], P[1], P[2], Q, -1.,
imageSize, 0, 0, CALIB_ZERO_DISPARITY);
Mat map1[3], map2[3];
fs << "R1" << R[0];
fs << "R2" << R[1];
fs << "R3" << R[2];
fs << "P1" << P[0];
fs << "P2" << P[1];
fs << "P3" << P[2];
<fim_middle>
fs << "R12" << R12;
fs << "T12" << T12;
fs << "R13" << R13;<fim_end>
|
run3Calibration(imgpt[0], imgpt[1], imgpt[2], imageSize,
boardSize, squareSize, aspectRatio, flags|CALIB_FIX_K4|CALIB_FIX_K5,
cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],
cameraMatrix[2], distCoeffs[2],
R12, T12, R13, T13);
fs.open(outputFilename, FileStorage::WRITE);
fs << "cameraMatrix1" << cameraMatrix[0];
fs << "cameraMatrix2" << cameraMatrix[1];
fs << "cameraMatrix3" << cameraMatrix[2];
fs << "distCoeffs1" << distCoeffs[0];
fs << "distCoeffs2" << distCoeffs[1];
fs << "distCoeffs3" << distCoeffs[2];
|
fs << "R12" << R12;
fs << "T12" << T12;
fs << "R13" << R13;
|
fs << "T13" << T13;
fs << "imageWidth" << imageSize.width;
fs << "imageHeight" << imageSize.height;
Mat Q;
// step 3: find rectification transforms
double ratio = rectify3Collinear(cameraMatrix[0], distCoeffs[0], cameraMatrix[1],
distCoeffs[1], cameraMatrix[2], distCoeffs[2],
imgpt[0], imgpt[2],
imageSize, R12, T12, R13, T13,
R[0], R[1], R[2], P[0], P[1], P[2], Q, -1.,
imageSize, 0, 0, CALIB_ZERO_DISPARITY);
Mat map1[3], map2[3];
fs << "R1" << R[0];
fs << "R2" << R[1];
fs << "R3" << R[2];
fs << "P1" << P[0];
fs << "P2" << P[1];
fs << "P3" << P[2];
|
random
|
<fim_prefix>_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
ae->actions[_accessibility_action(p_action)] = p_callable;
accesskit_node_add_action(ae->node, _accessibility_action(p_action));
}
void AccessibilityDriverAccessKit::accessibility_update_add_custom_action(const RID &p_id, int p_action_id, const String &p_action_description) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_action_description.is_empty()) <fim_suffix> else {
String cs_name = vformat("Custom Action %d", p_action_id);
accesskit_custom_action ca = accesskit_custom_action_new(p_action_id, cs_name.utf8().ptr());
accesskit_node_push_custom_action(ae->node, ca);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_table_row_count(const RID &p_id, int p_count) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_row_count(ae->node, p_count);
}
void AccessibilityDriverAccessKit::accessibility_update_set_table_c<fim_middle>{
accesskit_custom_action ca = accesskit_custom_action_new(p_action_id, p_action_description.utf8().ptr());
accesskit_node_push_custom_action(ae->node, ca);
}<fim_end>
|
_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
ae->actions[_accessibility_action(p_action)] = p_callable;
accesskit_node_add_action(ae->node, _accessibility_action(p_action));
}
void AccessibilityDriverAccessKit::accessibility_update_add_custom_action(const RID &p_id, int p_action_id, const String &p_action_description) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_action_description.is_empty())
|
{
accesskit_custom_action ca = accesskit_custom_action_new(p_action_id, p_action_description.utf8().ptr());
accesskit_node_push_custom_action(ae->node, ca);
}
|
else {
String cs_name = vformat("Custom Action %d", p_action_id);
accesskit_custom_action ca = accesskit_custom_action_new(p_action_id, cs_name.utf8().ptr());
accesskit_node_push_custom_action(ae->node, ca);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_table_row_count(const RID &p_id, int p_count) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_row_count(ae->node, p_count);
}
void AccessibilityDriverAccessKit::accessibility_update_set_table_c
|
ast_based
|
<fim_prefix> dsa->send_window_event(DisplayServer::WINDOW_EVENT_GO_BACK_REQUEST, true);
}
}
Input::get_singleton()->parse_input_event(ev);
}
void AndroidInputHandler::_cancel_all_touch() {
_parse_all_touch(false, true);
touch.clear();
}
void AndroidInputHandler::_parse_all_touch(bool p_pressed, bool p_canceled, bool p_double_tap) {
if (touch.size()) {
//end all if exist
for (int i = 0; i < touch.size(); i++) {
Ref<InputEventScreenTouch> ev;
ev.instantiate();
ev->set_index(touch[i].id);
ev->set_pressed(p_pressed);
ev->set_canceled(p_canceled);
ev->set_position(touch[i].pos);
ev->set_double_tap(p_double_tap);
Input::get_singleton()->parse_input_event(ev);
}
}
}
<fim_suffix>
void AndroidInputHandler::process_touch_event(int p_event, int p_pointer, const Vector<TouchPos> &p_points, bool p_double_tap) {
switch (p_event) {
case AMOTION_EVENT_ACTION_DOWN: { //gesture begin
// Release any remaining touches or mouse event
_release_mouse_event_info();
_release_all_touch();
touch.resize(p_points.size());
for (int i = 0; i < p_points.size(); i++) {
touch.write[i].id = p_points[i].id;
touch.write[i].pos = p_points[i].pos;
touch.write[i].pressure = p_points[i].pressure;
touch.write[i].tilt = p_points[i].tilt;
}
//send touch
_parse_all_touch(true, false, p_double_tap);
} break;
case AMOTION_EVENT_ACTION_MOVE: { //motion<fim_middle>void AndroidInputHandler::_release_all_touch() {
_parse_all_touch(false, false);
touch.clear();
}<fim_end>
|
dsa->send_window_event(DisplayServer::WINDOW_EVENT_GO_BACK_REQUEST, true);
}
}
Input::get_singleton()->parse_input_event(ev);
}
void AndroidInputHandler::_cancel_all_touch() {
_parse_all_touch(false, true);
touch.clear();
}
void AndroidInputHandler::_parse_all_touch(bool p_pressed, bool p_canceled, bool p_double_tap) {
if (touch.size()) {
//end all if exist
for (int i = 0; i < touch.size(); i++) {
Ref<InputEventScreenTouch> ev;
ev.instantiate();
ev->set_index(touch[i].id);
ev->set_pressed(p_pressed);
ev->set_canceled(p_canceled);
ev->set_position(touch[i].pos);
ev->set_double_tap(p_double_tap);
Input::get_singleton()->parse_input_event(ev);
}
}
}
|
void AndroidInputHandler::_release_all_touch() {
_parse_all_touch(false, false);
touch.clear();
}
|
void AndroidInputHandler::process_touch_event(int p_event, int p_pointer, const Vector<TouchPos> &p_points, bool p_double_tap) {
switch (p_event) {
case AMOTION_EVENT_ACTION_DOWN: { //gesture begin
// Release any remaining touches or mouse event
_release_mouse_event_info();
_release_all_touch();
touch.resize(p_points.size());
for (int i = 0; i < p_points.size(); i++) {
touch.write[i].id = p_points[i].id;
touch.write[i].pos = p_points[i].pos;
touch.write[i].pressure = p_points[i].pressure;
touch.write[i].tilt = p_points[i].tilt;
}
//send touch
_parse_all_touch(true, false, p_double_tap);
} break;
case AMOTION_EVENT_ACTION_MOVE: { //motion
|
random
|
<fim_prefix>nt end_frame = has_animation ? MAX(0, frames->get_frame_count(animation) - 1) : 0;
bool is_changed = frame != p_frame;
if (p_frame < 0) {
frame = 0;
} else if (has_animation && p_frame > end_frame) {
frame = end_frame;
} else {
frame = p_frame;
}
_calc_frame_speed_scale();
frame_progress = p_progress;
if (!is_changed) {
return; // No change, don't redraw.
}
queue_redraw();
emit_signal(SceneStringName(frame_changed));
}
void AnimatedSprite2D::set_speed_scale(float p_speed_scale) {
speed_scale = p_speed_scale;
}
float AnimatedSprite2D::get_speed_scale() const {
return speed_scale;
}
float AnimatedSprite2D::get_playing_speed() const {
if (!playing) {
return 0;
}
<fim_suffix>
}
void AnimatedSprite2D::set_centered(bool p_center) {
if (centered == p_center) {
return;
}
centered = p_center;
queue_redraw();
item_rect_changed();
}
bool AnimatedSprite2D::is_centered() const {
return centered;
}
void AnimatedSprite2D::set_offset(const Point2 &p_offset) {
if (offset == p_offset) {
return;
}
offset = p_offset;
queue_redraw();
item_rect_changed();
}
Point2 AnimatedSprite2D::get_offset() const {
return offset;
}
void AnimatedSprite2D::set_flip_h(bool p_flip) {
if (hflip == p_flip) {
return;
}
hflip = p_flip;
queue_redraw();
}
bool AnimatedSprite2D::is_flipped_h() const {
return hflip;
}
void AnimatedSprite2D::set_flip_v(bool p_flip) {
if (<fim_middle>return speed_scale * custom_speed_scale;<fim_end>
|
nt end_frame = has_animation ? MAX(0, frames->get_frame_count(animation) - 1) : 0;
bool is_changed = frame != p_frame;
if (p_frame < 0) {
frame = 0;
} else if (has_animation && p_frame > end_frame) {
frame = end_frame;
} else {
frame = p_frame;
}
_calc_frame_speed_scale();
frame_progress = p_progress;
if (!is_changed) {
return; // No change, don't redraw.
}
queue_redraw();
emit_signal(SceneStringName(frame_changed));
}
void AnimatedSprite2D::set_speed_scale(float p_speed_scale) {
speed_scale = p_speed_scale;
}
float AnimatedSprite2D::get_speed_scale() const {
return speed_scale;
}
float AnimatedSprite2D::get_playing_speed() const {
if (!playing) {
return 0;
}
|
return speed_scale * custom_speed_scale;
|
}
void AnimatedSprite2D::set_centered(bool p_center) {
if (centered == p_center) {
return;
}
centered = p_center;
queue_redraw();
item_rect_changed();
}
bool AnimatedSprite2D::is_centered() const {
return centered;
}
void AnimatedSprite2D::set_offset(const Point2 &p_offset) {
if (offset == p_offset) {
return;
}
offset = p_offset;
queue_redraw();
item_rect_changed();
}
Point2 AnimatedSprite2D::get_offset() const {
return offset;
}
void AnimatedSprite2D::set_flip_h(bool p_flip) {
if (hflip == p_flip) {
return;
}
hflip = p_flip;
queue_redraw();
}
bool AnimatedSprite2D::is_flipped_h() const {
return hflip;
}
void AnimatedSprite2D::set_flip_v(bool p_flip) {
if (
|
ast_based
|
<fim_prefix>ng on success, or -1 on failure
// - The output string is always null-terminated and cleared on failure
// - When retrieving a string, an extra byte must be allocated to account for the null terminator
// - GGUF array values are not supported by these functions
// Get metadata value as a string by key name
LLAMA_API int32_t llama_adapter_meta_val_str(const struct llama_adapter_lora * adapter, const char * key, char * buf, size_t buf_size);
// Get the number of metadata key/value pairs
LLAMA_API int32_t llama_adapter_meta_count(const struct llama_adapter_lora * adapter);
// Get metadata key name by index
LLAMA_API int32_t llama_adapter_meta_key_by_index(<fim_suffix>, int32_t i, char * buf, size_t buf_size);
// Get metadata value as a string by index
LLAMA_API int32_t llama_adapter_meta_val_str_by_index(const struct llama_adapter_lora * adapter, int32_t i, char * buf, size_t buf_size);
// Manually free a LoRA adapter
// Note: loaded adapters will be free when the associated model is deleted
LLAMA_API void llama_adapter_lora_free(struct llama_adapter_lora * adapter);
// Get the invocation tokens if the current lora is an alora
LLAMA_API uint64_t llama_adapter_get_alora_n_invocation_tokens(const struct llama_adapter_lora * adapter);
LLAMA_API const llama_token * llama_adapter_get_alora_invocation_tokens (cons<fim_middle>const struct llama_adapter_lora * adapter<fim_end>
|
ng on success, or -1 on failure
// - The output string is always null-terminated and cleared on failure
// - When retrieving a string, an extra byte must be allocated to account for the null terminator
// - GGUF array values are not supported by these functions
// Get metadata value as a string by key name
LLAMA_API int32_t llama_adapter_meta_val_str(const struct llama_adapter_lora * adapter, const char * key, char * buf, size_t buf_size);
// Get the number of metadata key/value pairs
LLAMA_API int32_t llama_adapter_meta_count(const struct llama_adapter_lora * adapter);
// Get metadata key name by index
LLAMA_API int32_t llama_adapter_meta_key_by_index(
|
const struct llama_adapter_lora * adapter
|
, int32_t i, char * buf, size_t buf_size);
// Get metadata value as a string by index
LLAMA_API int32_t llama_adapter_meta_val_str_by_index(const struct llama_adapter_lora * adapter, int32_t i, char * buf, size_t buf_size);
// Manually free a LoRA adapter
// Note: loaded adapters will be free when the associated model is deleted
LLAMA_API void llama_adapter_lora_free(struct llama_adapter_lora * adapter);
// Get the invocation tokens if the current lora is an alora
LLAMA_API uint64_t llama_adapter_get_alora_n_invocation_tokens(const struct llama_adapter_lora * adapter);
LLAMA_API const llama_token * llama_adapter_get_alora_invocation_tokens (cons
|
ast_based
|
<fim_prefix> mutex.unlock();
}
void AudioDriverDummy::set_use_threads(bool p_use_threads) {
use_threads = p_use_threads;
}
void AudioDriverDummy::set_speaker_mode(SpeakerMode p_mode) {
speaker_mode = p_mode;
}
void AudioDriverDummy::set_mix_rate(int p_rate) {
mix_rate = p_rate;
}
uint32_t AudioDriverDummy::get_channels() const {
static const int channels_for_mode[4] = { 2, 4, 8, 16 };
return channels_for_mode[speaker_mode];
}
void AudioDriverDummy::mix_audio(int p_frames, int32_t *p_buffer) {
ERR_FAIL_COND(!active.is_set()); // If not active, should not mix.
ERR_FAIL_COND(use_threads == true); // If using threads, this will not work well.
uint32_t todo = p_frames;
while (todo) {<fim_suffix> lock();
audio_server_process(to_mix, samples_in);
unlock();
uint32_t total_samples = to_mix * channels;
for (uint32_t i = 0; i < total_samples; i++) {
p_buffer[i] = samples_in[i];
}
todo -= to_mix;
p_buffer += total_samples;
}
}
void AudioDriverDummy::finish() {
if (use_threads) {
exit_thread.set();
if (thread.is_started()) {
thread.wait_to_finish();
}
}
if (samples_in) {
memdelete_arr(samples_in);
}
}
AudioDriverDummy::AudioDriverDummy() {
singleton = this;
}
<fim_middle> uint32_t to_mix = MIN(buffer_frames, todo);<fim_end>
|
mutex.unlock();
}
void AudioDriverDummy::set_use_threads(bool p_use_threads) {
use_threads = p_use_threads;
}
void AudioDriverDummy::set_speaker_mode(SpeakerMode p_mode) {
speaker_mode = p_mode;
}
void AudioDriverDummy::set_mix_rate(int p_rate) {
mix_rate = p_rate;
}
uint32_t AudioDriverDummy::get_channels() const {
static const int channels_for_mode[4] = { 2, 4, 8, 16 };
return channels_for_mode[speaker_mode];
}
void AudioDriverDummy::mix_audio(int p_frames, int32_t *p_buffer) {
ERR_FAIL_COND(!active.is_set()); // If not active, should not mix.
ERR_FAIL_COND(use_threads == true); // If using threads, this will not work well.
uint32_t todo = p_frames;
while (todo) {
|
uint32_t to_mix = MIN(buffer_frames, todo);
|
lock();
audio_server_process(to_mix, samples_in);
unlock();
uint32_t total_samples = to_mix * channels;
for (uint32_t i = 0; i < total_samples; i++) {
p_buffer[i] = samples_in[i];
}
todo -= to_mix;
p_buffer += total_samples;
}
}
void AudioDriverDummy::finish() {
if (use_threads) {
exit_thread.set();
if (thread.is_started()) {
thread.wait_to_finish();
}
}
if (samples_in) {
memdelete_arr(samples_in);
}
}
AudioDriverDummy::AudioDriverDummy() {
singleton = this;
}
|
random
|
<fim_prefix>alibData->allCharucoCorners.end(); ++it)
for(int l = 0; l < (*it).size[0]; l++) {
int i = (int)((*it).at<float>(l, 0) / xGridStep);
int j = (int)((*it).at<float>(l, 1) / yGridStep);
pointsInCell[i*gridSize + j]++;
}
cv::Mat mean, stdDev;
cv::meanStdDev(pointsInCell, mean, stdDev);
return mean.at<double>(0) / (stdDev.at<double>(0) + 1e-7);
}
calib::calibController::calibController()
{
mCalibFlags = 0;
}
calib::calibController::calibController(cv::Ptr<calib::calibrationData> data, int initialFlags, bool autoTuning, int minFramesNum) :
mCalibData(data)
{
mCalibFlags = initialFlags;
mNeedTuning = autoTuning;
<fim_suffix>
mCoverageQualityState = false;
}
void calib::calibController::updateState()
{
if(mCalibData->cameraMatrix.total()) {
const double relErrEps = 0.05;
bool fConfState = false, cConfState = false, dConfState = true;
if(sigmaMult*mCalibData->stdDeviations.at<double>(0) / mCalibData->cameraMatrix.at<double>(0,0) < relErrEps &&
sigmaMult*mCalibData->stdDeviations.at<double>(1) / mCalibData->cameraMatrix.at<double>(1,1) < relErrEps)
fConfState = true;
if(sigmaMult*mCalibData->stdDeviations.at<double>(2) / mCalibData->cameraMatrix.at<double>(0,2) < relErrEps &&
sigmaMult*mCalibData->stdDeviations.at<double>(3) / mCa<fim_middle>mMinFramesNum = minFramesNum;
mConfIntervalsState = false;<fim_end>
|
alibData->allCharucoCorners.end(); ++it)
for(int l = 0; l < (*it).size[0]; l++) {
int i = (int)((*it).at<float>(l, 0) / xGridStep);
int j = (int)((*it).at<float>(l, 1) / yGridStep);
pointsInCell[i*gridSize + j]++;
}
cv::Mat mean, stdDev;
cv::meanStdDev(pointsInCell, mean, stdDev);
return mean.at<double>(0) / (stdDev.at<double>(0) + 1e-7);
}
calib::calibController::calibController()
{
mCalibFlags = 0;
}
calib::calibController::calibController(cv::Ptr<calib::calibrationData> data, int initialFlags, bool autoTuning, int minFramesNum) :
mCalibData(data)
{
mCalibFlags = initialFlags;
mNeedTuning = autoTuning;
|
mMinFramesNum = minFramesNum;
mConfIntervalsState = false;
|
mCoverageQualityState = false;
}
void calib::calibController::updateState()
{
if(mCalibData->cameraMatrix.total()) {
const double relErrEps = 0.05;
bool fConfState = false, cConfState = false, dConfState = true;
if(sigmaMult*mCalibData->stdDeviations.at<double>(0) / mCalibData->cameraMatrix.at<double>(0,0) < relErrEps &&
sigmaMult*mCalibData->stdDeviations.at<double>(1) / mCalibData->cameraMatrix.at<double>(1,1) < relErrEps)
fConfState = true;
if(sigmaMult*mCalibData->stdDeviations.at<double>(2) / mCalibData->cameraMatrix.at<double>(0,2) < relErrEps &&
sigmaMult*mCalibData->stdDeviations.at<double>(3) / mCa
|
ast_based
|
<fim_prefix>ermissions and
// limitations under the License.
#include "errcode.h" // for ASSERT_HOST
#include "helpers.h" // for copy_string
#include "tprintf.h" // for tprintf
#include <tesseract/baseapi.h>
#include <tesseract/renderer.h>
#include <memory>
#include <sstream> // for std::stringstream
namespace tesseract {
/// Add coordinates to specified TextBlock, TextLine or String bounding box.
/// Add word confidence if adding to a String bounding box.
///
static void AddBoxToAlto(const ResultIterator *it, PageIteratorLevel level,
std::stringstream &alto_str) {
int left, top, right, bottom;
it->BoundingBox(level, &left, &top, &right, &bottom);
int hpos = left;
<fim_suffix>
int height = bottom - top;
int width = right - left;
alto_str << " HPOS=\"" << hpos << "\"";
alto_str << " VPOS=\"" << vpos << "\"";
alto_str << " WIDTH=\"" << width << "\"";
alto_str << " HEIGHT=\"" << height << "\"";
if (level == RIL_WORD) {
int wc = it->Confidence(RIL_WORD);
alto_str << " WC=\"0." << wc << "\"";
} else {
alto_str << ">";
}
}
static std::string GetID(const char *prefix, int page_number, int counter) {
std::stringstream idstr;
// IDs will only have the counter for the first page to keep them consistent
// with the IDs assigned before this change was made.
// From the second page on, IDs will also contain the page number to make them<fim_middle>int vpos = top;<fim_end>
|
ermissions and
// limitations under the License.
#include "errcode.h" // for ASSERT_HOST
#include "helpers.h" // for copy_string
#include "tprintf.h" // for tprintf
#include <tesseract/baseapi.h>
#include <tesseract/renderer.h>
#include <memory>
#include <sstream> // for std::stringstream
namespace tesseract {
/// Add coordinates to specified TextBlock, TextLine or String bounding box.
/// Add word confidence if adding to a String bounding box.
///
static void AddBoxToAlto(const ResultIterator *it, PageIteratorLevel level,
std::stringstream &alto_str) {
int left, top, right, bottom;
it->BoundingBox(level, &left, &top, &right, &bottom);
int hpos = left;
|
int vpos = top;
|
int height = bottom - top;
int width = right - left;
alto_str << " HPOS=\"" << hpos << "\"";
alto_str << " VPOS=\"" << vpos << "\"";
alto_str << " WIDTH=\"" << width << "\"";
alto_str << " HEIGHT=\"" << height << "\"";
if (level == RIL_WORD) {
int wc = it->Confidence(RIL_WORD);
alto_str << " WC=\"0." << wc << "\"";
} else {
alto_str << ">";
}
}
static std::string GetID(const char *prefix, int page_number, int counter) {
std::stringstream idstr;
// IDs will only have the counter for the first page to keep them consistent
// with the IDs assigned before this change was made.
// From the second page on, IDs will also contain the page number to make them
|
ast_based
|
<fim_prefix>{
return false;
}
if (is_filtered) {
String path = String(animation->track_get_path(p_track_index));
if (root && root->has_node(path)) {
Node *node = root->get_node(path);
if (!node) {
return false; // No node, no filter.
}
if (!EditorNode::get_singleton()->get_editor_selection()->is_selected(node)) {
return false; // Skip track due to not selected.
}
}
}
return true;
}
// Check if the curves for a track are displayed in the editor (not hidden). Includes the check on the track visibility.
bool AnimationBezierTrackEdit::_is_track_curves_displayed(int p_track_index) {
// Is the track is visible in the editor?
if (!_is_track_displayed(p_track_index)) {
<fim_suffix>
}
// And curves visible?
if (hidden_tracks.has(p_track_index)) {
return false;
}
return true;
}
Ref<Animation> AnimationBezierTrackEdit::get_animation() const {
return animation;
}
void AnimationBezierTrackEdit::set_animation_and_track(const Ref<Animation> &p_animation, int p_track, bool p_read_only) {
animation = p_animation;
read_only = p_read_only;
selected_track = p_track;
queue_redraw();
}
Size2 AnimationBezierTrackEdit::get_minimum_size() const {
return Vector2(1, 1);
}
Control::CursorShape AnimationBezierTrackEdit::get_cursor_shape(const Point2 &p_pos) const {
// Box selecting or moving a handle
if (box_selecting || Math::abs(moving_handle) == 1) {
return get_d<fim_middle>return false;<fim_end>
|
{
return false;
}
if (is_filtered) {
String path = String(animation->track_get_path(p_track_index));
if (root && root->has_node(path)) {
Node *node = root->get_node(path);
if (!node) {
return false; // No node, no filter.
}
if (!EditorNode::get_singleton()->get_editor_selection()->is_selected(node)) {
return false; // Skip track due to not selected.
}
}
}
return true;
}
// Check if the curves for a track are displayed in the editor (not hidden). Includes the check on the track visibility.
bool AnimationBezierTrackEdit::_is_track_curves_displayed(int p_track_index) {
// Is the track is visible in the editor?
if (!_is_track_displayed(p_track_index)) {
|
return false;
|
}
// And curves visible?
if (hidden_tracks.has(p_track_index)) {
return false;
}
return true;
}
Ref<Animation> AnimationBezierTrackEdit::get_animation() const {
return animation;
}
void AnimationBezierTrackEdit::set_animation_and_track(const Ref<Animation> &p_animation, int p_track, bool p_read_only) {
animation = p_animation;
read_only = p_read_only;
selected_track = p_track;
queue_redraw();
}
Size2 AnimationBezierTrackEdit::get_minimum_size() const {
return Vector2(1, 1);
}
Control::CursorShape AnimationBezierTrackEdit::get_cursor_shape(const Point2 &p_pos) const {
// Box selecting or moving a handle
if (box_selecting || Math::abs(moving_handle) == 1) {
return get_d
|
ast_based
|
<fim_prefix> rq_data = String::utf8(p_request->data.value.value);
} break;
case ACCESSKIT_ACTION_DATA_NUMERIC_VALUE: {
rq_data = p_request->data.value.numeric_value;
} break;
case ACCESSKIT_ACTION_DATA_SCROLL_HINT: {
switch (p_request->data.value.scroll_hint) {
case ACCESSKIT_SCROLL_HINT_TOP_LEFT: {
rq_data = DisplayServer::SCROLL_HINT_TOP_LEFT;
} break;
case ACCESSKIT_SCROLL_HINT_BOTTOM_RIGHT: {
rq_data = DisplayServer::SCROLL_HINT_BOTTOM_RIGHT;
} break;
case ACCESSKIT_SCROLL_HINT_TOP_EDGE: {
rq_data = DisplayServer::SCROLL_HINT_TOP_EDGE;
} break;
case ACCESSKIT_SCROLL_HINT_BOTTOM_EDGE: {
<fim_suffix>
} break;
case ACCESSKIT_SCROLL_HINT_LEFT_EDGE: {
rq_data = DisplayServer::SCROLL_HINT_LEFT_EDGE;
} break;
case ACCESSKIT_SCROLL_HINT_RIGHT_EDGE: {
rq_data = DisplayServer::SCROLL_HINT_RIGHT_EDGE;
} break;
default:
break;
}
} break;
case ACCESSKIT_ACTION_DATA_SCROLL_UNIT: {
if (p_request->data.value.scroll_unit == ACCESSKIT_SCROLL_UNIT_ITEM) {
rq_data = DisplayServer::SCROLL_UNIT_ITEM;
} else if (p_request->data.value.scroll_unit == ACCESSKIT_SCROLL_UNIT_PAGE) {
rq_data = DisplayServer::SCROLL_UNIT_PAGE;
}
} break;
case ACCESSKIT_ACTION_DATA_SCROLL_TO_POINT: {
rq_<fim_middle>rq_data = DisplayServer::SCROLL_HINT_BOTTOM_EDGE;<fim_end>
|
rq_data = String::utf8(p_request->data.value.value);
} break;
case ACCESSKIT_ACTION_DATA_NUMERIC_VALUE: {
rq_data = p_request->data.value.numeric_value;
} break;
case ACCESSKIT_ACTION_DATA_SCROLL_HINT: {
switch (p_request->data.value.scroll_hint) {
case ACCESSKIT_SCROLL_HINT_TOP_LEFT: {
rq_data = DisplayServer::SCROLL_HINT_TOP_LEFT;
} break;
case ACCESSKIT_SCROLL_HINT_BOTTOM_RIGHT: {
rq_data = DisplayServer::SCROLL_HINT_BOTTOM_RIGHT;
} break;
case ACCESSKIT_SCROLL_HINT_TOP_EDGE: {
rq_data = DisplayServer::SCROLL_HINT_TOP_EDGE;
} break;
case ACCESSKIT_SCROLL_HINT_BOTTOM_EDGE: {
|
rq_data = DisplayServer::SCROLL_HINT_BOTTOM_EDGE;
|
} break;
case ACCESSKIT_SCROLL_HINT_LEFT_EDGE: {
rq_data = DisplayServer::SCROLL_HINT_LEFT_EDGE;
} break;
case ACCESSKIT_SCROLL_HINT_RIGHT_EDGE: {
rq_data = DisplayServer::SCROLL_HINT_RIGHT_EDGE;
} break;
default:
break;
}
} break;
case ACCESSKIT_ACTION_DATA_SCROLL_UNIT: {
if (p_request->data.value.scroll_unit == ACCESSKIT_SCROLL_UNIT_ITEM) {
rq_data = DisplayServer::SCROLL_UNIT_ITEM;
} else if (p_request->data.value.scroll_unit == ACCESSKIT_SCROLL_UNIT_PAGE) {
rq_data = DisplayServer::SCROLL_UNIT_PAGE;
}
} break;
case ACCESSKIT_ACTION_DATA_SCROLL_TO_POINT: {
rq_
|
ast_based
|
<fim_prefix>> ctx_map;
auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
auto it = ctx_map.find(buft);
if (it == ctx_map.end()) {
ggml_init_params params = {
/*.mem_size =*/ hparams.n_layer*ggml_tensor_overhead(),
/*.mem_buffer =*/ NULL,
/*.no_alloc =*/ true,
};
ggml_context * ctx = ggml_init(params);
if (!ctx) {
return nullptr;
}
ctx_map[buft] = ctx;
ctxs.emplace_back(ctx);
return ctx;
}
return it->second;
};
// make tensors
tensors.reserve(hparams.n_layer);
<fim_suffix> // there's never a tensor for layer 0
for (size_t il = 1; il < hparams.n_layer; il++) {
ggml_backend_buffer_type_t buft = model.select_buft(il);
ggml_context * ctx = ctx_for_buft(buft);
if (!ctx) {
LLAMA_LOG_ERROR("%s: failed to allocate context for control vector\n", __func__);
return false;
}
ggml_tensor * tensor = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hparams.n_embd);
tensors.push_back(tensor);
}
// allocate tensors / buffers and zero
bufs.reserve(ctx_map.size());
for (auto it : ctx_map) {
ggml_backend_buffer_type_t buft = it.first;
ggml_context * ctx = it.second;
ggml_ba<fim_middle>tensors.push_back(nullptr);<fim_end>
|
> ctx_map;
auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
auto it = ctx_map.find(buft);
if (it == ctx_map.end()) {
ggml_init_params params = {
/*.mem_size =*/ hparams.n_layer*ggml_tensor_overhead(),
/*.mem_buffer =*/ NULL,
/*.no_alloc =*/ true,
};
ggml_context * ctx = ggml_init(params);
if (!ctx) {
return nullptr;
}
ctx_map[buft] = ctx;
ctxs.emplace_back(ctx);
return ctx;
}
return it->second;
};
// make tensors
tensors.reserve(hparams.n_layer);
|
tensors.push_back(nullptr);
|
// there's never a tensor for layer 0
for (size_t il = 1; il < hparams.n_layer; il++) {
ggml_backend_buffer_type_t buft = model.select_buft(il);
ggml_context * ctx = ctx_for_buft(buft);
if (!ctx) {
LLAMA_LOG_ERROR("%s: failed to allocate context for control vector\n", __func__);
return false;
}
ggml_tensor * tensor = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hparams.n_embd);
tensors.push_back(tensor);
}
// allocate tensors / buffers and zero
bufs.reserve(ctx_map.size());
for (auto it : ctx_map) {
ggml_backend_buffer_type_t buft = it.first;
ggml_context * ctx = it.second;
ggml_ba
|
ast_based
|
<fim_prefix> for(size_t i = 0; i < ctx_train.positive_entries.size(); ++i) {
bool success = false;
tokenized_prompt t = tokenized_prompts[i];
cb_data.n_layers = n_layers;
cb_data.n_tokens = t.max_seq_len;
printf("Evaluating prompt[%d/%d]: \"%s\" - \"%s\" (%d tokens)\n",
(int) i+1, (int) ctx_train.positive_entries.size(),
tokens_to_str(ctx, t.tokens_pos.cbegin(), t.tokens_pos.cend()).c_str(),
tokens_to_str(ctx, t.tokens_neg.cbegin(), t.tokens_neg.cend()).c_str(),
(int) t.max_seq_len);
cb_data.is_eval_pos = true;
success = get_hidden_layers(ctx, t.tokens_pos);
if (!success) break;
<fim_suffix>
success = get_hidden_layers(ctx, t.tokens_neg);
if (!success) break;
// calculate diff and remove all zero rows
auto v_diff_filtered = cb_data.calc_diff();
// save & concat the filtered v_diff to ctx_train
ctx_train.concat_diff_tmp(v_diff_filtered);
// reset for next iteration
cb_data.reset();
}
// done with the model, we can now free it to make gain some memory
printf("Done evaluate prompts, unload model...\n");
bool use_pca = params.cvector_dimre_method == DIMRE_METHOD_PCA;
// prepare ctx_train for PCA
ctx_train.build_v_diff(use_pca);
if (use_pca) {
// run PCA
PCA::pca_params<fim_middle>cb_data.is_eval_pos = false;<fim_end>
|
for(size_t i = 0; i < ctx_train.positive_entries.size(); ++i) {
bool success = false;
tokenized_prompt t = tokenized_prompts[i];
cb_data.n_layers = n_layers;
cb_data.n_tokens = t.max_seq_len;
printf("Evaluating prompt[%d/%d]: \"%s\" - \"%s\" (%d tokens)\n",
(int) i+1, (int) ctx_train.positive_entries.size(),
tokens_to_str(ctx, t.tokens_pos.cbegin(), t.tokens_pos.cend()).c_str(),
tokens_to_str(ctx, t.tokens_neg.cbegin(), t.tokens_neg.cend()).c_str(),
(int) t.max_seq_len);
cb_data.is_eval_pos = true;
success = get_hidden_layers(ctx, t.tokens_pos);
if (!success) break;
|
cb_data.is_eval_pos = false;
|
success = get_hidden_layers(ctx, t.tokens_neg);
if (!success) break;
// calculate diff and remove all zero rows
auto v_diff_filtered = cb_data.calc_diff();
// save & concat the filtered v_diff to ctx_train
ctx_train.concat_diff_tmp(v_diff_filtered);
// reset for next iteration
cb_data.reset();
}
// done with the model, we can now free it to make gain some memory
printf("Done evaluate prompts, unload model...\n");
bool use_pca = params.cvector_dimre_method == DIMRE_METHOD_PCA;
// prepare ctx_train for PCA
ctx_train.build_v_diff(use_pca);
if (use_pca) {
// run PCA
PCA::pca_params
|
ast_based
|
<fim_prefix>name of the image file.
begin_document = true;
return true;
}
///
/// Append the ALTO XML for the layout of the image
///
bool TessAltoRenderer::AddImageHandler(TessBaseAPI *api) {
if (begin_document) {
AppendString(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
"<alto xmlns=\"http://www.loc.gov/standards/alto/ns-v3#\" "
"xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" "
"xsi:schemaLocation=\"http://www.loc.gov/standards/alto/ns-v3# "
"http://www.loc.gov/alto/v3/alto-3-0.xsd\">\n"
"\t<Description>\n"
"\t\t<MeasurementUnit>pixel</MeasurementUnit>\n"
"\t\t<sourceImageInformation>\n"
"\t\t\t<fileName>");
AppendString(<fim_suffix>);
AppendString(
"</fileName>\n"
"\t\t</sourceImageInformation>\n"
"\t\t<OCRProcessing ID=\"OCR_0\">\n"
"\t\t\t<ocrProcessingStep>\n"
"\t\t\t\t<processingSoftware>\n"
"\t\t\t\t\t<softwareName>Tesseract</softwareName>\n"
"\t\t\t\t\t<softwareVersion>");
AppendString(TessBaseAPI::Version());
AppendString(
"</softwareVersion>\n"
"\t\t\t\t</processingSoftware>\n"
"\t\t\t</ocrProcessingStep>\n"
"\t\t</OCRProcessing>\n"
"\t</Description>\n"
"\t<Layout>\n");
begin_document = false;
}
const std::unique_ptr<const char[]> text(api->GetAltoText(imagenum()));
if (text == nullptr) {
return false;
}
<fim_middle>api->GetInputName()<fim_end>
|
name of the image file.
begin_document = true;
return true;
}
///
/// Append the ALTO XML for the layout of the image
///
bool TessAltoRenderer::AddImageHandler(TessBaseAPI *api) {
if (begin_document) {
AppendString(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
"<alto xmlns=\"http://www.loc.gov/standards/alto/ns-v3#\" "
"xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" "
"xsi:schemaLocation=\"http://www.loc.gov/standards/alto/ns-v3# "
"http://www.loc.gov/alto/v3/alto-3-0.xsd\">\n"
"\t<Description>\n"
"\t\t<MeasurementUnit>pixel</MeasurementUnit>\n"
"\t\t<sourceImageInformation>\n"
"\t\t\t<fileName>");
AppendString(
|
api->GetInputName()
|
);
AppendString(
"</fileName>\n"
"\t\t</sourceImageInformation>\n"
"\t\t<OCRProcessing ID=\"OCR_0\">\n"
"\t\t\t<ocrProcessingStep>\n"
"\t\t\t\t<processingSoftware>\n"
"\t\t\t\t\t<softwareName>Tesseract</softwareName>\n"
"\t\t\t\t\t<softwareVersion>");
AppendString(TessBaseAPI::Version());
AppendString(
"</softwareVersion>\n"
"\t\t\t\t</processingSoftware>\n"
"\t\t\t</ocrProcessingStep>\n"
"\t\t</OCRProcessing>\n"
"\t</Description>\n"
"\t<Layout>\n");
begin_document = false;
}
const std::unique_ptr<const char[]> text(api->GetAltoText(imagenum()));
if (text == nullptr) {
return false;
}
|
ast_based
|
<fim_prefix>tracks.has(track)) {
hidden_tracks.erase(track);
} else {
hidden_tracks.insert(track);
if (selected_track == track) {
for (int i = 0; i < animation->get_track_count(); ++i) {
if (!hidden_tracks.has(i) && animation->track_get_type(i) == Animation::TrackType::TYPE_BEZIER) {
set_animation_and_track(animation, i, read_only);
break;
}
}
}
}
Vector<int> visible_tracks;
for (int i = 0; i < animation->get_track_count(); ++i) {
if (!hidden_tracks.has(i) && animation->track_get_type(i) == Animation::TrackType::TYPE_BEZIER) {
visible_tracks.push_back(i);
}
}
if (<fim_suffix> == 1) {
solo_track = visible_tracks[0];
} else {
solo_track = -1;
}
queue_redraw();
return;
} else if (I.key == SOLO_ICON) {
if (solo_track == track) {
solo_track = -1;
hidden_tracks.clear();
} else {
if (hidden_tracks.has(track)) {
hidden_tracks.erase(track);
}
for (int i = 0; i < animation->get_track_count(); ++i) {
if (animation->track_get_type(i) == Animation::TrackType::TYPE_BEZIER) {
if (i != track && !hidden_tracks.has(i)) {
hidden_tracks.insert(i);
}
}
}
set_animation_and_track(animation, track, read_only);
solo_track =<fim_middle>visible_tracks.size()<fim_end>
|
tracks.has(track)) {
hidden_tracks.erase(track);
} else {
hidden_tracks.insert(track);
if (selected_track == track) {
for (int i = 0; i < animation->get_track_count(); ++i) {
if (!hidden_tracks.has(i) && animation->track_get_type(i) == Animation::TrackType::TYPE_BEZIER) {
set_animation_and_track(animation, i, read_only);
break;
}
}
}
}
Vector<int> visible_tracks;
for (int i = 0; i < animation->get_track_count(); ++i) {
if (!hidden_tracks.has(i) && animation->track_get_type(i) == Animation::TrackType::TYPE_BEZIER) {
visible_tracks.push_back(i);
}
}
if (
|
visible_tracks.size()
|
== 1) {
solo_track = visible_tracks[0];
} else {
solo_track = -1;
}
queue_redraw();
return;
} else if (I.key == SOLO_ICON) {
if (solo_track == track) {
solo_track = -1;
hidden_tracks.clear();
} else {
if (hidden_tracks.has(track)) {
hidden_tracks.erase(track);
}
for (int i = 0; i < animation->get_track_count(); ++i) {
if (animation->track_get_type(i) == Animation::TrackType::TYPE_BEZIER) {
if (i != track && !hidden_tracks.has(i)) {
hidden_tracks.insert(i);
}
}
}
set_animation_and_track(animation, track, read_only);
solo_track =
|
ast_based
|
<fim_prefix> ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_background_color(ae->node, p_color.to_rgba32());
}
void AccessibilityDriverAccessKit::accessibility_update_set_foreground_color(const RID &p_id, const Color &p_color) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
<fim_suffix> _ensure_node(p_id, ae);
accesskit_node_set_foreground_color(ae->node, p_color.to_rgba32());
}
Error AccessibilityDriverAccessKit::init() {
#ifdef ACCESSKIT_DYNAMIC
#ifdef DEBUG_ENABLED
int dylibloader_verbose = 1;
#else
int dylibloader_verbose = 0;
#endif
void *library_handle = nullptr;
String path;
String arch = Engine::get_singleton()->get_architecture_name();
#ifdef LINUXBSD_ENABLED
path = OS::get_singleton()->get_executable_path().get_base_dir().path_join("libaccesskit." + arch + ".so");
if (!FileAccess::exists(path)) {
path = OS::get_singleton()->get_executable_path().get_base_dir().path_join("../lib").path_join("libaccesskit." + arch + ".so");
}<fim_middle> AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);<fim_end>
|
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_background_color(ae->node, p_color.to_rgba32());
}
void AccessibilityDriverAccessKit::accessibility_update_set_foreground_color(const RID &p_id, const Color &p_color) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
|
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
|
_ensure_node(p_id, ae);
accesskit_node_set_foreground_color(ae->node, p_color.to_rgba32());
}
Error AccessibilityDriverAccessKit::init() {
#ifdef ACCESSKIT_DYNAMIC
#ifdef DEBUG_ENABLED
int dylibloader_verbose = 1;
#else
int dylibloader_verbose = 0;
#endif
void *library_handle = nullptr;
String path;
String arch = Engine::get_singleton()->get_architecture_name();
#ifdef LINUXBSD_ENABLED
path = OS::get_singleton()->get_executable_path().get_base_dir().path_join("libaccesskit." + arch + ".so");
if (!FileAccess::exists(path)) {
path = OS::get_singleton()->get_executable_path().get_base_dir().path_join("../lib").path_join("libaccesskit." + arch + ".so");
}
|
random
|
<fim_prefix>RID &p_id, const RID &p_child_id) override;
void accessibility_update_add_related_controls(const RID &p_id, const RID &p_related_id) override;
void accessibility_update_add_related_details(const RID &p_id, const RID &p_related_id) override;
void accessibility_update_add_related_described_by(const RID &p_id, const RID &p_related_id) override;
void accessibility_update_add_related_flow_to(const RID &p_id, const RID &p_related_id) override;
void accessibility_update_add_related_labeled_by(const RID &p_id, const RID &p_related_id) override;
void accessibility_update_add_related_radio_group(const RID &p_id, const RID &p_related_id) override;
void accessibility_update_set_active_descendant(<fim_suffix>, const RID &p_other_id) override;
void accessibility_update_set_next_on_line(const RID &p_id, const RID &p_other_id) override;
void accessibility_update_set_previous_on_line(const RID &p_id, const RID &p_other_id) override;
void accessibility_update_set_member_of(const RID &p_id, const RID &p_group_id) override;
void accessibility_update_set_in_page_link_target(const RID &p_id, const RID &p_other_id) override;
void accessibility_update_set_error_message(const RID &p_id, const RID &p_other_id) override;
void accessibility_update_set_live(const RID &p_id, DisplayServer::AccessibilityLiveMode p_live) override;
void accessibility_update_add_action(const RID &p_id, DisplayServer::Accessib<fim_middle>const RID &p_id<fim_end>
|
RID &p_id, const RID &p_child_id) override;
void accessibility_update_add_related_controls(const RID &p_id, const RID &p_related_id) override;
void accessibility_update_add_related_details(const RID &p_id, const RID &p_related_id) override;
void accessibility_update_add_related_described_by(const RID &p_id, const RID &p_related_id) override;
void accessibility_update_add_related_flow_to(const RID &p_id, const RID &p_related_id) override;
void accessibility_update_add_related_labeled_by(const RID &p_id, const RID &p_related_id) override;
void accessibility_update_add_related_radio_group(const RID &p_id, const RID &p_related_id) override;
void accessibility_update_set_active_descendant(
|
const RID &p_id
|
, const RID &p_other_id) override;
void accessibility_update_set_next_on_line(const RID &p_id, const RID &p_other_id) override;
void accessibility_update_set_previous_on_line(const RID &p_id, const RID &p_other_id) override;
void accessibility_update_set_member_of(const RID &p_id, const RID &p_group_id) override;
void accessibility_update_set_in_page_link_target(const RID &p_id, const RID &p_other_id) override;
void accessibility_update_set_error_message(const RID &p_id, const RID &p_other_id) override;
void accessibility_update_set_live(const RID &p_id, DisplayServer::AccessibilityLiveMode p_live) override;
void accessibility_update_add_action(const RID &p_id, DisplayServer::Accessib
|
ast_based
|
<fim_prefix> tokens_to_str(ctx, t.tokens_pos.cbegin(), t.tokens_pos.cend()).c_str(),
tokens_to_str(ctx, t.tokens_neg.cbegin(), t.tokens_neg.cend()).c_str(),
(int) t.max_seq_len);
cb_data.is_eval_pos = true;
success = get_hidden_layers(ctx, t.tokens_pos);
if (!success) break;
cb_data.is_eval_pos = false;
success = get_hidden_layers(ctx, t.tokens_neg);
if (!success) break;
// calculate diff and remove all zero rows
auto v_diff_filtered = cb_data.calc_diff();
// save & concat the filtered v_diff to ctx_train
ctx_train.concat_diff_tmp(v_diff_filtered);
// reset for next iteration
<fim_suffix>
}
// done with the model, we can now free it to make gain some memory
printf("Done evaluate prompts, unload model...\n");
bool use_pca = params.cvector_dimre_method == DIMRE_METHOD_PCA;
// prepare ctx_train for PCA
ctx_train.build_v_diff(use_pca);
if (use_pca) {
// run PCA
PCA::pca_params pca_params;
pca_params.n_threads = params.cpuparams.n_threads;
pca_params.n_batch = params.n_pca_batch;
pca_params.n_iterations = params.n_pca_iterations;
PCA::run_pca(pca_params, ctx_train.v_diff, ctx_train.v_final);
} else {
// run mean
mean::run(ctx_train.v_diff, ctx_train.v_final);
}
// <fim_middle>cb_data.reset();<fim_end>
|
tokens_to_str(ctx, t.tokens_pos.cbegin(), t.tokens_pos.cend()).c_str(),
tokens_to_str(ctx, t.tokens_neg.cbegin(), t.tokens_neg.cend()).c_str(),
(int) t.max_seq_len);
cb_data.is_eval_pos = true;
success = get_hidden_layers(ctx, t.tokens_pos);
if (!success) break;
cb_data.is_eval_pos = false;
success = get_hidden_layers(ctx, t.tokens_neg);
if (!success) break;
// calculate diff and remove all zero rows
auto v_diff_filtered = cb_data.calc_diff();
// save & concat the filtered v_diff to ctx_train
ctx_train.concat_diff_tmp(v_diff_filtered);
// reset for next iteration
|
cb_data.reset();
|
}
// done with the model, we can now free it to make gain some memory
printf("Done evaluate prompts, unload model...\n");
bool use_pca = params.cvector_dimre_method == DIMRE_METHOD_PCA;
// prepare ctx_train for PCA
ctx_train.build_v_diff(use_pca);
if (use_pca) {
// run PCA
PCA::pca_params pca_params;
pca_params.n_threads = params.cpuparams.n_threads;
pca_params.n_batch = params.n_pca_batch;
pca_params.n_iterations = params.n_pca_iterations;
PCA::run_pca(pca_params, ctx_train.v_diff, ctx_train.v_final);
} else {
// run mean
mean::run(ctx_train.v_diff, ctx_train.v_final);
}
//
|
ast_based
|
<fim_prefix> ImGui_ImplAllegro5_CreateDeviceObjects();
// Setup display size (every frame to accommodate for window resizing)
ImGuiIO& io = ImGui::GetIO();
int w, h;
w = al_get_display_width(bd->Display);
h = al_get_display_height(bd->Display);
io.DisplaySize = ImVec2((float)w, (float)h);
// Setup time step
double current_time = al_get_time();
io.DeltaTime = bd->Time > 0.0 ? (float)(current_time - bd->Time) : (float)(1.0f / 60.0f);
bd->Time = current_time;
// Allegro 5 doesn't receive PrintScreen under Windows
#ifdef _WIN32
io.AddKeyEvent(ImGuiKey_PrintScreen, (::GetAsyncKeyState(VK_SNAPSHOT) & 0x8000) != 0);
#endif
<fim_suffix> ImGui_ImplAllegro5_UpdateMouseCursor();
}
//-----------------------------------------------------------------------------
#endif // #ifndef IMGUI_DISABLE
<fim_middle> // Setup mouse cursor shape<fim_end>
|
ImGui_ImplAllegro5_CreateDeviceObjects();
// Setup display size (every frame to accommodate for window resizing)
ImGuiIO& io = ImGui::GetIO();
int w, h;
w = al_get_display_width(bd->Display);
h = al_get_display_height(bd->Display);
io.DisplaySize = ImVec2((float)w, (float)h);
// Setup time step
double current_time = al_get_time();
io.DeltaTime = bd->Time > 0.0 ? (float)(current_time - bd->Time) : (float)(1.0f / 60.0f);
bd->Time = current_time;
// Allegro 5 doesn't receive PrintScreen under Windows
#ifdef _WIN32
io.AddKeyEvent(ImGuiKey_PrintScreen, (::GetAsyncKeyState(VK_SNAPSHOT) & 0x8000) != 0);
#endif
|
// Setup mouse cursor shape
|
ImGui_ImplAllegro5_UpdateMouseCursor();
}
//-----------------------------------------------------------------------------
#endif // #ifndef IMGUI_DISABLE
|
random
|
<fim_prefix> model.hparams.n_ctx = params.n_ctx;
model.hparams.n_embd = config.dim; //params.n_embd;
model.hparams.n_ff = config.hidden_dim;
model.hparams.n_mult = 32;//params.n_mult;
model.hparams.n_head = config.n_heads; //params.n_head;
model.hparams.n_head_kv = config.n_kv_heads;
model.hparams.n_layer = config.n_layers; //params.n_layer;
model.hparams.n_rot = std::min((uint32_t)params.n_rotmax, model.hparams.n_embd / model.hparams.n_head);
print_params(&model.hparams);
struct ggml_init_params lcparams;
lcparams.mem_size = 1024ll*1024ll*1024ll*((size_t) params.mem_model_gb);
lcparams.mem_buffer = NULL;<fim_suffix>
model.ctx = ggml_init(lcparams);
init_model(&model);
model.name = basename(params.fn_llama2c_model);
save_as_llama_model(&vocab, &model, &weights, params.fn_llama2c_output_model);
LOG_INF("%s: Saving llama.c model file %s in ggml format at %s\n", __func__, params.fn_llama2c_model, params.fn_llama2c_output_model);
ggml_free(model.ctx);
return 0;
}
<fim_middle> lcparams.no_alloc = false;<fim_end>
|
model.hparams.n_ctx = params.n_ctx;
model.hparams.n_embd = config.dim; //params.n_embd;
model.hparams.n_ff = config.hidden_dim;
model.hparams.n_mult = 32;//params.n_mult;
model.hparams.n_head = config.n_heads; //params.n_head;
model.hparams.n_head_kv = config.n_kv_heads;
model.hparams.n_layer = config.n_layers; //params.n_layer;
model.hparams.n_rot = std::min((uint32_t)params.n_rotmax, model.hparams.n_embd / model.hparams.n_head);
print_params(&model.hparams);
struct ggml_init_params lcparams;
lcparams.mem_size = 1024ll*1024ll*1024ll*((size_t) params.mem_model_gb);
lcparams.mem_buffer = NULL;
|
lcparams.no_alloc = false;
|
model.ctx = ggml_init(lcparams);
init_model(&model);
model.name = basename(params.fn_llama2c_model);
save_as_llama_model(&vocab, &model, &weights, params.fn_llama2c_output_model);
LOG_INF("%s: Saving llama.c model file %s in ggml format at %s\n", __func__, params.fn_llama2c_model, params.fn_llama2c_output_model);
ggml_free(model.ctx);
return 0;
}
|
random
|
<fim_prefix> "\"pp\": %d, \"tg\": %d, \"pl\": %d, \"n_kv\": %d, \"t_pp\": %f, \"speed_pp\": %f, \"t_tg\": %f, \"speed_tg\": %f, \"t\": %f, \"speed\": %f}\n",
n_kv_max, params.n_batch, params.n_ubatch, int(params.flash_attn_type), params.is_pp_shared, params.n_gpu_layers, ctx_params.n_threads, ctx_params.n_threads_batch,
pp, tg, pl, n_kv, t_pp, speed_pp, t_tg, speed_tg, t, speed
);
} else {
LOG("|%6d | %6d | %4d | %6d | %8.3f | %8.2f | %8.3f | %8.2f | %8.3f | %8.2f |\n", pp, tg, pl, n_kv, t_pp, speed_pp, t_tg, speed_tg, t, speed);
}
}
}
}
<fim_suffix>
llama_batch_free(batch);
llama_free(ctx);
llama_model_free(model);
llama_backend_free();
LOG("\n\n");
return 0;
}
<fim_middle> LOG("\n");
llama_perf_context_print(ctx);<fim_end>
|
"\"pp\": %d, \"tg\": %d, \"pl\": %d, \"n_kv\": %d, \"t_pp\": %f, \"speed_pp\": %f, \"t_tg\": %f, \"speed_tg\": %f, \"t\": %f, \"speed\": %f}\n",
n_kv_max, params.n_batch, params.n_ubatch, int(params.flash_attn_type), params.is_pp_shared, params.n_gpu_layers, ctx_params.n_threads, ctx_params.n_threads_batch,
pp, tg, pl, n_kv, t_pp, speed_pp, t_tg, speed_tg, t, speed
);
} else {
LOG("|%6d | %6d | %4d | %6d | %8.3f | %8.2f | %8.3f | %8.2f | %8.3f | %8.2f |\n", pp, tg, pl, n_kv, t_pp, speed_pp, t_tg, speed_tg, t, speed);
}
}
}
}
|
LOG("\n");
llama_perf_context_print(ctx);
|
llama_batch_free(batch);
llama_free(ctx);
llama_model_free(model);
llama_backend_free();
LOG("\n\n");
return 0;
}
|
random
|
<fim_prefix>:WriteSettings(m_banlist_json, {{JSON_KEY, BanMapToJson(banSet)}}, errors)) {
return true;
}
for (const auto& err : errors) {
LogError("%s\n", err);
}
return false;
}
bool CBanDB::Read(banmap_t& banSet)
{
if (fs::exists(m_banlist_dat)) {
LogWarning("banlist.dat ignored because it can only be read by " CLIENT_NAME " version 22.x. Remove %s to silence this warning.", fs::quoted(fs::PathToString(m_banlist_dat)));
}
// If the JSON banlist does not exist, then recreate it
if (!fs::exists(m_banlist_json)) {
return false;
}
std::map<std::string, common::SettingsValue> settings;
std::vector<std::string> errors;
if (!<fim_suffix>) {
for (const auto& err : errors) {
LogWarning("Cannot load banlist %s: %s", fs::PathToString(m_banlist_json), err);
}
return false;
}
try {
BanMapFromJson(settings[JSON_KEY], banSet);
} catch (const std::runtime_error& e) {
LogWarning("Cannot parse banlist %s: %s", fs::PathToString(m_banlist_json), e.what());
return false;
}
return true;
}
bool DumpPeerAddresses(const ArgsManager& args, const AddrMan& addr)
{
const auto pathAddr = args.GetDataDirNet() / "peers.dat";
return SerializeFileDB("peers", pathAddr, addr);
}
void ReadFromStream(AddrMan& addr, DataStream& ssPeers)
{
DeserializeDB(ssPeers,<fim_middle>common::ReadSettings(m_banlist_json, settings, errors)<fim_end>
|
:WriteSettings(m_banlist_json, {{JSON_KEY, BanMapToJson(banSet)}}, errors)) {
return true;
}
for (const auto& err : errors) {
LogError("%s\n", err);
}
return false;
}
bool CBanDB::Read(banmap_t& banSet)
{
if (fs::exists(m_banlist_dat)) {
LogWarning("banlist.dat ignored because it can only be read by " CLIENT_NAME " version 22.x. Remove %s to silence this warning.", fs::quoted(fs::PathToString(m_banlist_dat)));
}
// If the JSON banlist does not exist, then recreate it
if (!fs::exists(m_banlist_json)) {
return false;
}
std::map<std::string, common::SettingsValue> settings;
std::vector<std::string> errors;
if (!
|
common::ReadSettings(m_banlist_json, settings, errors)
|
) {
for (const auto& err : errors) {
LogWarning("Cannot load banlist %s: %s", fs::PathToString(m_banlist_json), err);
}
return false;
}
try {
BanMapFromJson(settings[JSON_KEY], banSet);
} catch (const std::runtime_error& e) {
LogWarning("Cannot parse banlist %s: %s", fs::PathToString(m_banlist_json), e.what());
return false;
}
return true;
}
bool DumpPeerAddresses(const ArgsManager& args, const AddrMan& addr)
{
const auto pathAddr = args.GetDataDirNet() / "peers.dat";
return SerializeFileDB("peers", pathAddr, addr);
}
void ReadFromStream(AddrMan& addr, DataStream& ssPeers)
{
DeserializeDB(ssPeers,
|
ast_based
|
<fim_prefix>#pragma once
#include <hex.hpp>
#include <functional>
#include <list>
#include <memory>
#include <span>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include <imgui.h>
#include <hex/ui/imgui_imhex_extensions.h>
#include <hex/api/localization_manager.hpp>
#include <hex/helpers/auto_reset.hpp>
EXPORT_MODULE namespace hex {
class AchievementManager;
class Achievement {
public:
explicit Achievement(UnlocalizedString unlocalizedCategory, UnlocalizedString unlocalizedName) : m_unlocalizedCategory(std::move(unlocalizedCategory)), m_unlocalizedName(std::move(unlocalizedName)) { }<fim_suffix> [[nodiscard]] const UnlocalizedString &getUnlocalizedName() const {
return m_unlocalizedName;
}
/**
* @brief Returns the unlocalized category of the achievement
* @return Unlocalized category of the achievement
*/
[[nodiscard]] const UnlocalizedString &getUnlocalizedCategory() const {
return m_unlocalizedCategory;
}
/**
* @brief Returns whether the achievement is unlocked
* @return Whether the achievement is unlocked
*/
[[nodiscard]] bool isUnlocked() const {
return m_progress == m_maxProgress;
}
/**<fim_middle>
/**
* @brief Returns the unlocalized name of the achievement
* @return Unlocalized name of the achievement
*/<fim_end>
|
#pragma once
#include <hex.hpp>
#include <functional>
#include <list>
#include <memory>
#include <span>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include <imgui.h>
#include <hex/ui/imgui_imhex_extensions.h>
#include <hex/api/localization_manager.hpp>
#include <hex/helpers/auto_reset.hpp>
EXPORT_MODULE namespace hex {
class AchievementManager;
class Achievement {
public:
explicit Achievement(UnlocalizedString unlocalizedCategory, UnlocalizedString unlocalizedName) : m_unlocalizedCategory(std::move(unlocalizedCategory)), m_unlocalizedName(std::move(unlocalizedName)) { }
|
/**
* @brief Returns the unlocalized name of the achievement
* @return Unlocalized name of the achievement
*/
|
[[nodiscard]] const UnlocalizedString &getUnlocalizedName() const {
return m_unlocalizedName;
}
/**
* @brief Returns the unlocalized category of the achievement
* @return Unlocalized category of the achievement
*/
[[nodiscard]] const UnlocalizedString &getUnlocalizedCategory() const {
return m_unlocalizedCategory;
}
/**
* @brief Returns whether the achievement is unlocked
* @return Whether the achievement is unlocked
*/
[[nodiscard]] bool isUnlocked() const {
return m_progress == m_maxProgress;
}
/**
|
random
|
<fim_prefix> ERR_FAIL_NULL(ae);
AccessibilityElement *start_ae = rid_owner.get_or_null(p_text_start_id);
ERR_FAIL_NULL(start_ae);
ERR_FAIL_COND(start_ae->window_id != ae->window_id);
AccessibilityElement *end_ae = rid_owner.get_or_null(p_text_end_id);
ERR_FAIL_NULL(end_ae);
ERR_FAIL_COND(end_ae->window_id != ae->window_id);
int start_pos = p_start_char;
int end_pos = p_end_char;
RID start_rid;
RID end_rid;
for (const RID &rid : start_ae->children) {
const AccessibilityElement *child_ae = rid_owner.get_or_null(rid);
if (child_ae && child_ae->role == ACCESSKIT_ROLE_TEXT_RUN) {
if (p_start_char >= child_ae->run.x && p_start_char <= child_ae->run.y) {
start_rid = rid;<fim_suffix> break;
}
}
}
for (const RID &rid : end_ae->children) {
const AccessibilityElement *child_ae = rid_owner.get_or_null(rid);
if (child_ae && child_ae->role == ACCESSKIT_ROLE_TEXT_RUN) {
if (p_end_char >= child_ae->run.x && p_end_char <= child_ae->run.y) {
end_rid = rid;
end_pos = p_end_char - child_ae->run.x;
break;
}
}
}
ERR_FAIL_COND(start_rid.is_null() && end_rid.is_null());
_ensure_node(p_id, ae);
accesskit_text_selection sel;
sel.anchor.node = (accesskit_node_id)start_rid.get_id();
sel.anchor.character_index = start_pos;
sel.focus.node = (accesskit_node_id)end_rid.get_id();
sel.focus.character_index = end_pos;<fim_middle> start_pos = p_start_char - child_ae->run.x;<fim_end>
|
ERR_FAIL_NULL(ae);
AccessibilityElement *start_ae = rid_owner.get_or_null(p_text_start_id);
ERR_FAIL_NULL(start_ae);
ERR_FAIL_COND(start_ae->window_id != ae->window_id);
AccessibilityElement *end_ae = rid_owner.get_or_null(p_text_end_id);
ERR_FAIL_NULL(end_ae);
ERR_FAIL_COND(end_ae->window_id != ae->window_id);
int start_pos = p_start_char;
int end_pos = p_end_char;
RID start_rid;
RID end_rid;
for (const RID &rid : start_ae->children) {
const AccessibilityElement *child_ae = rid_owner.get_or_null(rid);
if (child_ae && child_ae->role == ACCESSKIT_ROLE_TEXT_RUN) {
if (p_start_char >= child_ae->run.x && p_start_char <= child_ae->run.y) {
start_rid = rid;
|
start_pos = p_start_char - child_ae->run.x;
|
break;
}
}
}
for (const RID &rid : end_ae->children) {
const AccessibilityElement *child_ae = rid_owner.get_or_null(rid);
if (child_ae && child_ae->role == ACCESSKIT_ROLE_TEXT_RUN) {
if (p_end_char >= child_ae->run.x && p_end_char <= child_ae->run.y) {
end_rid = rid;
end_pos = p_end_char - child_ae->run.x;
break;
}
}
}
ERR_FAIL_COND(start_rid.is_null() && end_rid.is_null());
_ensure_node(p_id, ae);
accesskit_text_selection sel;
sel.anchor.node = (accesskit_node_id)start_rid.get_id();
sel.anchor.character_index = start_pos;
sel.focus.node = (accesskit_node_id)end_rid.get_id();
sel.focus.character_index = end_pos;
|
random
|
<fim_prefix>
if (!p_description.is_empty()) {
accesskit_node_set_role_description(ae->node, p_description.utf8().ptr());
} else {
accesskit_node_clear_role_description(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_state_description(const RID &p_id, const String &p_description) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_description.is_empty()) {
accesskit_node_set_state_description(ae->node, p_description.utf8().ptr());
} else {<fim_suffix>
void AccessibilityDriverAccessKit::accessibility_update_set_color_value(const RID &p_id, const Color &p_color) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_color_value(ae->node, p_color.to_rgba32());
}
void AccessibilityDriverAccessKit::accessibility_update_set_background_color(const RID &p_id, const Color &p_color) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
<fim_middle> accesskit_node_clear_state_description(ae->node);
}
}<fim_end>
|
if (!p_description.is_empty()) {
accesskit_node_set_role_description(ae->node, p_description.utf8().ptr());
} else {
accesskit_node_clear_role_description(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_state_description(const RID &p_id, const String &p_description) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_description.is_empty()) {
accesskit_node_set_state_description(ae->node, p_description.utf8().ptr());
} else {
|
accesskit_node_clear_state_description(ae->node);
}
}
|
void AccessibilityDriverAccessKit::accessibility_update_set_color_value(const RID &p_id, const Color &p_color) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_color_value(ae->node, p_color.to_rgba32());
}
void AccessibilityDriverAccessKit::accessibility_update_set_background_color(const RID &p_id, const Color &p_color) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
|
random
|
<fim_prefix> ctx->border = CAROTENE_NS::BORDER_MODE_CONSTANT;
break;
case CV_HAL_BORDER_REPLICATE:
ctx->border = CAROTENE_NS::BORDER_MODE_REPLICATE;
break;
case CV_HAL_BORDER_REFLECT:
ctx->border = CAROTENE_NS::BORDER_MODE_REFLECT;
break;
case CV_HAL_BORDER_WRAP:
ctx->border = CAROTENE_NS::BORDER_MODE_WRAP;
break;
case CV_HAL_BORDER_REFLECT_101:
ctx->border = CAROTENE_NS::BORDER_MODE_REFLECT101;
break;
default:
delete ctx;
return CV_HAL_ERROR_NOT_IMPLEMENTED;
}
if(!CAROTENE_NS::isSeparableFilter3x3Supported(CAROTENE_NS::Size2D(16, 16), ctx->border, 3, 3))<fim_suffix> }
switch(kernel_type)
{
case CV_8UC1:
ctx->kernelx_data[0]=kernelx_data[0];
ctx->kernelx_data[1]=kernelx_data[1];
ctx->kernelx_data[2]=kernelx_data[2];
ctx->kernely_data[0]=kernely_data[0];
ctx->kernely_data[1]=kernely_data[1];
ctx->kernely_data[2]=kernely_data[2];
break;
case CV_8SC1:
ctx->kernelx_data[0]=((char*)kernelx_data)[0];
ctx->kernelx_data[1]=((char*)kernelx_data)[1];
ctx->kernelx_data[2]=((char*)kernelx_data)[2];
ctx->kernely_data[0]=((char*)kernely_data)[0];
ctx->kernely_data[1]=((char*)kernely_data)[1];
ctx->kernely_data[2]=((char*)kernely_data)[2];<fim_middle> {
delete ctx;
return CV_HAL_ERROR_NOT_IMPLEMENTED;<fim_end>
|
ctx->border = CAROTENE_NS::BORDER_MODE_CONSTANT;
break;
case CV_HAL_BORDER_REPLICATE:
ctx->border = CAROTENE_NS::BORDER_MODE_REPLICATE;
break;
case CV_HAL_BORDER_REFLECT:
ctx->border = CAROTENE_NS::BORDER_MODE_REFLECT;
break;
case CV_HAL_BORDER_WRAP:
ctx->border = CAROTENE_NS::BORDER_MODE_WRAP;
break;
case CV_HAL_BORDER_REFLECT_101:
ctx->border = CAROTENE_NS::BORDER_MODE_REFLECT101;
break;
default:
delete ctx;
return CV_HAL_ERROR_NOT_IMPLEMENTED;
}
if(!CAROTENE_NS::isSeparableFilter3x3Supported(CAROTENE_NS::Size2D(16, 16), ctx->border, 3, 3))
|
{
delete ctx;
return CV_HAL_ERROR_NOT_IMPLEMENTED;
|
}
switch(kernel_type)
{
case CV_8UC1:
ctx->kernelx_data[0]=kernelx_data[0];
ctx->kernelx_data[1]=kernelx_data[1];
ctx->kernelx_data[2]=kernelx_data[2];
ctx->kernely_data[0]=kernely_data[0];
ctx->kernely_data[1]=kernely_data[1];
ctx->kernely_data[2]=kernely_data[2];
break;
case CV_8SC1:
ctx->kernelx_data[0]=((char*)kernelx_data)[0];
ctx->kernelx_data[1]=((char*)kernelx_data)[1];
ctx->kernelx_data[2]=((char*)kernelx_data)[2];
ctx->kernely_data[0]=((char*)kernely_data)[0];
ctx->kernely_data[1]=((char*)kernely_data)[1];
ctx->kernely_data[2]=((char*)kernely_data)[2];
|
random
|
<fim_prefix>cnt = 0;
if (input_file_.empty()) {
SetInputName(nullptr);
}
std::stringstream alto_str;
// Use "C" locale (needed for int values larger than 999).
alto_str.imbue(std::locale::classic());
alto_str << "\t\t<Page WIDTH=\"" << rect_width_ << "\" HEIGHT=\"" << rect_height_
<< "\" PHYSICAL_IMG_NR=\"" << page_number << "\""
<< " ID=\"page_" << page_number << "\">\n"
<< "\t\t\t<PrintSpace HPOS=\"0\" VPOS=\"0\""
<< " WIDTH=\"" << rect_width_ << "\""
<< " HEIGHT=\"" << rect_height_ << "\">\n";
std::unique_ptr<ResultIterator> res_it(GetIterator());
while (!res_it->Empty(RIL_BLOCK)) {
if (res_it->Empty(RIL_WORD)) {
<fim_suffix>;
continue;
}
int left, top, right, bottom;
auto block_type = res_it->BlockType();
switch (block_type) {
case PT_FLOWING_IMAGE:
case PT_HEADING_IMAGE:
case PT_PULLOUT_IMAGE: {
// Handle all kinds of images.
// TODO: optionally add TYPE, for example TYPE="photo".
alto_str << "\t\t\t\t<Illustration ID=\"" << GetID("cblock", page_number, bcnt++) << "\"";
AddBoxToAlto(res_it.get(), RIL_BLOCK, alto_str);
alto_str << "</Illustration>\n";
res_it->Next(RIL_BLOCK);
continue;
}
case PT_HORZ_LINE:
case PT_VERT_LINE:
// Handle horizontal and vertical lines.
alto_str << "\t\<fim_middle>res_it->Next(RIL_WORD)<fim_end>
|
cnt = 0;
if (input_file_.empty()) {
SetInputName(nullptr);
}
std::stringstream alto_str;
// Use "C" locale (needed for int values larger than 999).
alto_str.imbue(std::locale::classic());
alto_str << "\t\t<Page WIDTH=\"" << rect_width_ << "\" HEIGHT=\"" << rect_height_
<< "\" PHYSICAL_IMG_NR=\"" << page_number << "\""
<< " ID=\"page_" << page_number << "\">\n"
<< "\t\t\t<PrintSpace HPOS=\"0\" VPOS=\"0\""
<< " WIDTH=\"" << rect_width_ << "\""
<< " HEIGHT=\"" << rect_height_ << "\">\n";
std::unique_ptr<ResultIterator> res_it(GetIterator());
while (!res_it->Empty(RIL_BLOCK)) {
if (res_it->Empty(RIL_WORD)) {
|
res_it->Next(RIL_WORD)
|
;
continue;
}
int left, top, right, bottom;
auto block_type = res_it->BlockType();
switch (block_type) {
case PT_FLOWING_IMAGE:
case PT_HEADING_IMAGE:
case PT_PULLOUT_IMAGE: {
// Handle all kinds of images.
// TODO: optionally add TYPE, for example TYPE="photo".
alto_str << "\t\t\t\t<Illustration ID=\"" << GetID("cblock", page_number, bcnt++) << "\"";
AddBoxToAlto(res_it.get(), RIL_BLOCK, alto_str);
alto_str << "</Illustration>\n";
res_it->Next(RIL_BLOCK);
continue;
}
case PT_HORZ_LINE:
case PT_VERT_LINE:
// Handle horizontal and vertical lines.
alto_str << "\t\
|
ast_based
|
<fim_prefix>ficient to provide a Pix directly.
*/
void TessBaseAPI::SetImage(Pix *pix) {
if (InternalSetImage()) {
if (pixGetSpp(pix) == 4 && pixGetInputFormat(pix) == IFF_PNG) {
// remove alpha channel from png
Pix *p1 = pixRemoveAlpha(pix);
pixSetSpp(p1, 3);
(void)pixCopy(pix, p1);
pixDestroy(&p1);
}
thresholder_->SetImage(pix);
SetInputImage(thresholder_->GetPixRect());
}
}
/**
* Restrict recognition to a sub-rectangle of the image. Call after SetImage.
* Each SetRectangle clears the recognition results so multiple rectangles
* can be recognized with the same image.
*/
void TessBaseAPI::SetRectangle(int left, int top, int width, int height) {
<fim_suffix>
thresholder_->SetRectangle(left, top, width, height);
ClearResults();
}
/**
* ONLY available after SetImage if you have Leptonica installed.
* Get a copy of the internal thresholded image from Tesseract.
*/
Pix *TessBaseAPI::GetThresholdedImage() {
if (tesseract_ == nullptr || thresholder_ == nullptr) {
return nullptr;
}
if (tesseract_->pix_binary() == nullptr && !Threshold(&tesseract_->mutable_pix_binary()->pix_)) {
return nullptr;
}
return tesseract_->pix_binary().clone();
}
/**
* Get the result of page layout analysis as a leptonica-style
* Boxa, Pixa pair, in reading order.
* Can be called before or after Recognize.
*/
Boxa *TessBaseAPI::GetRegions(Pixa **<fim_middle>if (thresholder_ == nullptr) {
return;
}<fim_end>
|
ficient to provide a Pix directly.
*/
void TessBaseAPI::SetImage(Pix *pix) {
if (InternalSetImage()) {
if (pixGetSpp(pix) == 4 && pixGetInputFormat(pix) == IFF_PNG) {
// remove alpha channel from png
Pix *p1 = pixRemoveAlpha(pix);
pixSetSpp(p1, 3);
(void)pixCopy(pix, p1);
pixDestroy(&p1);
}
thresholder_->SetImage(pix);
SetInputImage(thresholder_->GetPixRect());
}
}
/**
* Restrict recognition to a sub-rectangle of the image. Call after SetImage.
* Each SetRectangle clears the recognition results so multiple rectangles
* can be recognized with the same image.
*/
void TessBaseAPI::SetRectangle(int left, int top, int width, int height) {
|
if (thresholder_ == nullptr) {
return;
}
|
thresholder_->SetRectangle(left, top, width, height);
ClearResults();
}
/**
* ONLY available after SetImage if you have Leptonica installed.
* Get a copy of the internal thresholded image from Tesseract.
*/
Pix *TessBaseAPI::GetThresholdedImage() {
if (tesseract_ == nullptr || thresholder_ == nullptr) {
return nullptr;
}
if (tesseract_->pix_binary() == nullptr && !Threshold(&tesseract_->mutable_pix_binary()->pix_)) {
return nullptr;
}
return tesseract_->pix_binary().clone();
}
/**
* Get the result of page layout analysis as a leptonica-style
* Boxa, Pixa pair, in reading order.
* Can be called before or after Recognize.
*/
Boxa *TessBaseAPI::GetRegions(Pixa **
|
ast_based
|
<fim_prefix>paration, text_buf.get_size().y * 0.8), track_color);
subtrack_colors[current_track] = track_color;
subtracks[current_track] = rect;
} else {
draw_rect(rect, track_focus_color);
if (locked_tracks.has(selected_track)) {
selected_track_color.set_hsv(h, 0.0, 0.4);
} else {
selected_track_color.set_hsv(h, 0.8, 0.8);
}
}
Vector2 string_pos = Point2(margin + h_separation, vofs);
text_buf.draw(get_canvas_item(), string_pos, cc);
float icon_start_height = vofs + rect.size.y / 2.0;
Rect2 remove_rect = Rect2(remove_hpos, icon_start_height - remove->get_height() / 2.0, remove->get_width(), remove->get_height());
if <fim_suffix> else {
draw_texture(remove, remove_rect.position);
}
Rect2 lock_rect = Rect2(lock_hpos, icon_start_height - lock->get_height() / 2.0, lock->get_width(), lock->get_height());
if (locked_tracks.has(current_track)) {
draw_texture(lock, lock_rect.position);
} else {
draw_texture(unlock, lock_rect.position);
}
Rect2 visible_rect = Rect2(visibility_hpos, icon_start_height - visibility_visible->get_height() / 2.0, visibility_visible->get_width(), visibility_visible->get_height());
if (hidden_tracks.has(current_track)) {
draw_texture(visibility_hidden, visible_rect.position);
} else {
draw_texture(visibility_visible, visible_re<fim_middle>(read_only) {
draw_texture(remove, remove_rect.position, dc);
}<fim_end>
|
paration, text_buf.get_size().y * 0.8), track_color);
subtrack_colors[current_track] = track_color;
subtracks[current_track] = rect;
} else {
draw_rect(rect, track_focus_color);
if (locked_tracks.has(selected_track)) {
selected_track_color.set_hsv(h, 0.0, 0.4);
} else {
selected_track_color.set_hsv(h, 0.8, 0.8);
}
}
Vector2 string_pos = Point2(margin + h_separation, vofs);
text_buf.draw(get_canvas_item(), string_pos, cc);
float icon_start_height = vofs + rect.size.y / 2.0;
Rect2 remove_rect = Rect2(remove_hpos, icon_start_height - remove->get_height() / 2.0, remove->get_width(), remove->get_height());
if
|
(read_only) {
draw_texture(remove, remove_rect.position, dc);
}
|
else {
draw_texture(remove, remove_rect.position);
}
Rect2 lock_rect = Rect2(lock_hpos, icon_start_height - lock->get_height() / 2.0, lock->get_width(), lock->get_height());
if (locked_tracks.has(current_track)) {
draw_texture(lock, lock_rect.position);
} else {
draw_texture(unlock, lock_rect.position);
}
Rect2 visible_rect = Rect2(visibility_hpos, icon_start_height - visibility_visible->get_height() / 2.0, visibility_visible->get_width(), visibility_visible->get_height());
if (hidden_tracks.has(current_track)) {
draw_texture(visibility_hidden, visible_rect.position);
} else {
draw_texture(visibility_visible, visible_re
|
ast_based
|
<fim_prefix>nomial(const uchar* src_data, size_t src_step, uchar* dst_data, size_t dst_step,
int width, int height, int depth, int cn, size_t margin_left, size_t margin_top,
size_t margin_right, size_t margin_bottom, size_t ksize, int border_type)
{
CAROTENE_NS::Size2D sz(width, height);
CAROTENE_NS::BORDER_MODE border = borderCV2Carotene(border_type);
CAROTENE_NS::Margin mg(margin_left, margin_right, margin_top, margin_bottom);
if (ksize == 3)
{
if ((depth != CV_8U) || (cn != 1))
return CV_HAL_ERROR_NOT_IMPLEMENTED;
if (CAROTENE_NS::isGaussianBlur3x3MarginSupported(sz, border, mg))
{
<fim_suffix>;
return CV_HAL_ERROR_OK;
}
}
else if (ksize == 5)
{
if (!CAROTENE_NS::isGaussianBlur5x5Supported(sz, cn, border))
return CV_HAL_ERROR_NOT_IMPLEMENTED;
if (depth == CV_8U)
{
CAROTENE_NS::gaussianBlur5x5(sz, cn, (uint8_t*)src_data, src_step,
(uint8_t*)dst_data, dst_step, border, 0, mg);
return CV_HAL_ERROR_OK;
}
else if (depth == CV_16U)
{
CAROTENE_NS::gaussianBlur5x5(sz, cn, (uint16_t*)src_data, src_step,
(uint16_t*)dst_data, dst_step, border, 0, mg);
return CV_HAL_ERROR_O<fim_middle>CAROTENE_NS::gaussianBlur3x3Margin(sz, src_data, src_step, dst_data, dst_step,
border, 0, mg)<fim_end>
|
nomial(const uchar* src_data, size_t src_step, uchar* dst_data, size_t dst_step,
int width, int height, int depth, int cn, size_t margin_left, size_t margin_top,
size_t margin_right, size_t margin_bottom, size_t ksize, int border_type)
{
CAROTENE_NS::Size2D sz(width, height);
CAROTENE_NS::BORDER_MODE border = borderCV2Carotene(border_type);
CAROTENE_NS::Margin mg(margin_left, margin_right, margin_top, margin_bottom);
if (ksize == 3)
{
if ((depth != CV_8U) || (cn != 1))
return CV_HAL_ERROR_NOT_IMPLEMENTED;
if (CAROTENE_NS::isGaussianBlur3x3MarginSupported(sz, border, mg))
{
|
CAROTENE_NS::gaussianBlur3x3Margin(sz, src_data, src_step, dst_data, dst_step,
border, 0, mg)
|
;
return CV_HAL_ERROR_OK;
}
}
else if (ksize == 5)
{
if (!CAROTENE_NS::isGaussianBlur5x5Supported(sz, cn, border))
return CV_HAL_ERROR_NOT_IMPLEMENTED;
if (depth == CV_8U)
{
CAROTENE_NS::gaussianBlur5x5(sz, cn, (uint8_t*)src_data, src_step,
(uint8_t*)dst_data, dst_step, border, 0, mg);
return CV_HAL_ERROR_OK;
}
else if (depth == CV_16U)
{
CAROTENE_NS::gaussianBlur5x5(sz, cn, (uint16_t*)src_data, src_step,
(uint16_t*)dst_data, dst_step, border, 0, mg);
return CV_HAL_ERROR_O
|
ast_based
|
<fim_prefix>ctRatio;
string outputFilename;
string inputFilename = "";
vector<vector<Point2f> > imgpt[3];
vector<string> imageList;
cv::CommandLineParser parser(argc, argv,
"{help ||}{w||}{h||}{s|1|}{o|out_camera_data.yml|}"
"{zt||}{a|1|}{p||}{@input||}");
if (parser.has("help"))
{
help(argv);
return 0;
}
boardSize.width = parser.get<int>("w");
boardSize.height = parser.get<int>("h");
squareSize = parser.get<float>("s");
aspectRatio = parser.get<float>("a");
if (parser.has("a"))
flags |= CALIB_FIX_ASPECT_RATIO;
if (parser.has("zt"))
flags |= CALIB_ZERO_TANGENT_DIST;
if (parser.has("p"))
<fim_suffix>;
outputFilename = parser.get<string>("o");
inputFilename = parser.get<string>("@input");
if (!parser.check())
{
help(argv);
parser.printErrors();
return -1;
}
if (boardSize.width <= 0)
return fprintf( stderr, "Invalid board width\n" ), -1;
if (boardSize.height <= 0)
return fprintf( stderr, "Invalid board height\n" ), -1;
if (squareSize <= 0)
return fprintf( stderr, "Invalid board square width\n" ), -1;
if (aspectRatio <= 0)
return printf("Invalid aspect ratio\n" ), -1;
if( inputFilename.empty() ||
!readStringList(inputFilename, imageList) ||
imageList.size() == 0 || imageList.size()<fim_middle>flags |= CALIB_FIX_PRINCIPAL_POINT<fim_end>
|
ctRatio;
string outputFilename;
string inputFilename = "";
vector<vector<Point2f> > imgpt[3];
vector<string> imageList;
cv::CommandLineParser parser(argc, argv,
"{help ||}{w||}{h||}{s|1|}{o|out_camera_data.yml|}"
"{zt||}{a|1|}{p||}{@input||}");
if (parser.has("help"))
{
help(argv);
return 0;
}
boardSize.width = parser.get<int>("w");
boardSize.height = parser.get<int>("h");
squareSize = parser.get<float>("s");
aspectRatio = parser.get<float>("a");
if (parser.has("a"))
flags |= CALIB_FIX_ASPECT_RATIO;
if (parser.has("zt"))
flags |= CALIB_ZERO_TANGENT_DIST;
if (parser.has("p"))
|
flags |= CALIB_FIX_PRINCIPAL_POINT
|
;
outputFilename = parser.get<string>("o");
inputFilename = parser.get<string>("@input");
if (!parser.check())
{
help(argv);
parser.printErrors();
return -1;
}
if (boardSize.width <= 0)
return fprintf( stderr, "Invalid board width\n" ), -1;
if (boardSize.height <= 0)
return fprintf( stderr, "Invalid board height\n" ), -1;
if (squareSize <= 0)
return fprintf( stderr, "Invalid board square width\n" ), -1;
if (aspectRatio <= 0)
return printf("Invalid aspect ratio\n" ), -1;
if( inputFilename.empty() ||
!readStringList(inputFilename, imageList) ||
imageList.size() == 0 || imageList.size()
|
ast_based
|
<fim_prefix>
bool TessBaseAPI::ProcessPage(Pix *pix, int page_index, const char *filename,
const char *retry_config, int timeout_millisec,
TessResultRenderer *renderer) {
SetInputName(filename);
SetImage(pix);
bool failed = false;
if (tesseract_->tessedit_pageseg_mode == PSM_AUTO_ONLY) {
// Disabled character recognition
if (! std::unique_ptr<const PageIterator>(AnalyseLayout())) {
failed = true;
}
} else if (tesseract_->tessedit_pageseg_mode == PSM_OSD_ONLY) {
failed = FindLines() != 0;
} else if (timeout_millisec > 0) {
// Running with a timeout.
ETEXT_DESC monitor;
monitor.cancel = nullptr;
<fim_suffix>;
monitor.set_deadline_msecs(timeout_millisec);
// Now run the main recognition.
failed = Recognize(&monitor) < 0;
} else {
// Normal layout and character recognition with no timeout.
failed = Recognize(nullptr) < 0;
}
if (tesseract_->tessedit_write_images) {
Pix *page_pix = GetThresholdedImage();
std::string output_filename = output_file_ + ".processed";
if (page_index > 0) {
output_filename += std::to_string(page_index);
}
output_filename += ".tif";
pixWrite(output_filename.c_str(), page_pix, IFF_TIFF_G4);
pixDestroy(&page_pix);
}
if (failed && retry_config != nullptr && retry_config[0] != '\0') {
// Save current config<fim_middle>monitor.cancel_this = nullptr<fim_end>
|
bool TessBaseAPI::ProcessPage(Pix *pix, int page_index, const char *filename,
const char *retry_config, int timeout_millisec,
TessResultRenderer *renderer) {
SetInputName(filename);
SetImage(pix);
bool failed = false;
if (tesseract_->tessedit_pageseg_mode == PSM_AUTO_ONLY) {
// Disabled character recognition
if (! std::unique_ptr<const PageIterator>(AnalyseLayout())) {
failed = true;
}
} else if (tesseract_->tessedit_pageseg_mode == PSM_OSD_ONLY) {
failed = FindLines() != 0;
} else if (timeout_millisec > 0) {
// Running with a timeout.
ETEXT_DESC monitor;
monitor.cancel = nullptr;
|
monitor.cancel_this = nullptr
|
;
monitor.set_deadline_msecs(timeout_millisec);
// Now run the main recognition.
failed = Recognize(&monitor) < 0;
} else {
// Normal layout and character recognition with no timeout.
failed = Recognize(nullptr) < 0;
}
if (tesseract_->tessedit_write_images) {
Pix *page_pix = GetThresholdedImage();
std::string output_filename = output_file_ + ".processed";
if (page_index > 0) {
output_filename += std::to_string(page_index);
}
output_filename += ".tif";
pixWrite(output_filename.c_str(), page_pix, IFF_TIFF_G4);
pixDestroy(&page_pix);
}
if (failed && retry_config != nullptr && retry_config[0] != '\0') {
// Save current config
|
ast_based
|
<fim_prefix>splayServer::AccessibilityFlags::FLAG_VISITED: {
if (p_value) {
accesskit_node_set_visited(ae->node);
} else {
accesskit_node_clear_visited(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_BUSY: {
if (p_value) {
accesskit_node_set_busy(ae->node);
} else {
accesskit_node_clear_busy(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_MODAL: {
if (p_value) {
accesskit_node_set_modal(ae->node);
} else {
accesskit_node_clear_modal(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_TOUCH_PASSTHROUGH: {
if (p_value) {
accesskit_node_set_touch_transparent(ae->node);
} else {
<fim_suffix>
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_READONLY: {
if (p_value) {
accesskit_node_set_read_only(ae->node);
} else {
accesskit_node_clear_read_only(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_DISABLED: {
if (p_value) {
accesskit_node_set_disabled(ae->node);
} else {
accesskit_node_clear_disabled(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_CLIPS_CHILDREN: {
if (p_value) {
accesskit_node_set_clips_children(ae->node);
} else {
accesskit_node_clear_clips_children(ae->node);
}
} break;
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_classname(const RI<fim_middle>accesskit_node_clear_touch_transparent(ae->node);<fim_end>
|
splayServer::AccessibilityFlags::FLAG_VISITED: {
if (p_value) {
accesskit_node_set_visited(ae->node);
} else {
accesskit_node_clear_visited(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_BUSY: {
if (p_value) {
accesskit_node_set_busy(ae->node);
} else {
accesskit_node_clear_busy(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_MODAL: {
if (p_value) {
accesskit_node_set_modal(ae->node);
} else {
accesskit_node_clear_modal(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_TOUCH_PASSTHROUGH: {
if (p_value) {
accesskit_node_set_touch_transparent(ae->node);
} else {
|
accesskit_node_clear_touch_transparent(ae->node);
|
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_READONLY: {
if (p_value) {
accesskit_node_set_read_only(ae->node);
} else {
accesskit_node_clear_read_only(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_DISABLED: {
if (p_value) {
accesskit_node_set_disabled(ae->node);
} else {
accesskit_node_clear_disabled(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_CLIPS_CHILDREN: {
if (p_value) {
accesskit_node_set_clips_children(ae->node);
} else {
accesskit_node_clear_clips_children(ae->node);
}
} break;
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_classname(const RI
|
ast_based
|
<fim_prefix>
break;
}
}
ERR_CONTINUE(idx == -1);
if (touch[i].pos == p_points[idx].pos) {
continue; // Don't move unnecessarily.
}
Ref<InputEventScreenDrag> ev;
ev.instantiate();
ev->set_index(touch[i].id);
ev->set_position(p_points[idx].pos);
ev->set_relative(p_points[idx].pos - touch[i].pos);
ev->set_relative_screen_position(ev->get_relative());
ev->set_pressure(p_points[idx].pressure);
ev->set_tilt(p_points[idx].tilt);
Input::get_singleton()->parse_input_event(ev);
touch.write[i].pos = p_points[idx].pos;
}
} break;
case AMOTION_EVENT_ACTION_CANCEL: {
_cancel_all_touch();
} break;
case AMOTION_EVENT_ACTION_UP: { <fim_suffix>
} break;
case AMOTION_EVENT_ACTION_POINTER_DOWN: { // add touch
for (int i = 0; i < p_points.size(); i++) {
if (p_points[i].id == p_pointer) {
TouchPos tp = p_points[i];
touch.push_back(tp);
Ref<InputEventScreenTouch> ev;
ev.instantiate();
ev->set_index(tp.id);
ev->set_pressed(true);
ev->set_position(tp.pos);
Input::get_singleton()->parse_input_event(ev);
break;
}
}
} break;
case AMOTION_EVENT_ACTION_POINTER_UP: { // remove touch
for (int i = 0; i < touch.size(); i++) {
if (touch[i].id == p_pointer) {
Ref<InputEventScreenTouch> ev;
ev.instantiate();
ev->set_index(touch[i].id);
ev->set_pressed(fa<fim_middle>//release
_release_all_touch();<fim_end>
|
break;
}
}
ERR_CONTINUE(idx == -1);
if (touch[i].pos == p_points[idx].pos) {
continue; // Don't move unnecessarily.
}
Ref<InputEventScreenDrag> ev;
ev.instantiate();
ev->set_index(touch[i].id);
ev->set_position(p_points[idx].pos);
ev->set_relative(p_points[idx].pos - touch[i].pos);
ev->set_relative_screen_position(ev->get_relative());
ev->set_pressure(p_points[idx].pressure);
ev->set_tilt(p_points[idx].tilt);
Input::get_singleton()->parse_input_event(ev);
touch.write[i].pos = p_points[idx].pos;
}
} break;
case AMOTION_EVENT_ACTION_CANCEL: {
_cancel_all_touch();
} break;
case AMOTION_EVENT_ACTION_UP: {
|
//release
_release_all_touch();
|
} break;
case AMOTION_EVENT_ACTION_POINTER_DOWN: { // add touch
for (int i = 0; i < p_points.size(); i++) {
if (p_points[i].id == p_pointer) {
TouchPos tp = p_points[i];
touch.push_back(tp);
Ref<InputEventScreenTouch> ev;
ev.instantiate();
ev->set_index(tp.id);
ev->set_pressed(true);
ev->set_position(tp.pos);
Input::get_singleton()->parse_input_event(ev);
break;
}
}
} break;
case AMOTION_EVENT_ACTION_POINTER_UP: { // remove touch
for (int i = 0; i < touch.size(); i++) {
if (touch[i].id == p_pointer) {
Ref<InputEventScreenTouch> ev;
ev.instantiate();
ev->set_index(touch[i].id);
ev->set_pressed(fa
|
ast_based
|
<fim_prefix>_model * llama_model_load_from_splits(
const char ** paths,
size_t n_paths,
struct llama_model_params params);
LLAMA_API void llama_model_save_to_file(
const struct llama_model * model,
const char * path_model);
DEPRECATED(LLAMA_API void llama_free_model(struct llama_model * model),
"use llama_model_free instead");
LLAMA_API void llama_model_free(struct llama_model * model);
LLAMA_API struct llama_context * llama_init_from_model(
struct llama_model * model,
struct llama_context_params params);
DEPRECATED(<fim_suffix>,
"use llama_init_from_model instead");
// Frees all allocated memory
LLAMA_API void llama_free(struct llama_context * ctx);
LLAMA_API int64_t llama_time_us(void);
LLAMA_API size_t llama_max_devices(void);
LLAMA_API size_t llama_max_parallel_sequences(void);
LLAMA_API bool llama_supports_mmap (void);
LLAMA_API bool llama_supports_mlock (void);
LLAMA_API bool llama_supports_gpu_offload(void);
LLAMA_API bool llama_supports_rpc (void);
LLAMA_API uint32_t llama_n_ctx (const struct llama_context * ctx);
LLAMA_API uint32_t llama_n_batch (const struct llama_context * ctx);
LLAMA_API uint32_t llama_n_ubatch <fim_middle>LLAMA_API struct llama_context * llama_new_context_with_model(
struct llama_model * model,
struct llama_context_params params)<fim_end>
|
_model * llama_model_load_from_splits(
const char ** paths,
size_t n_paths,
struct llama_model_params params);
LLAMA_API void llama_model_save_to_file(
const struct llama_model * model,
const char * path_model);
DEPRECATED(LLAMA_API void llama_free_model(struct llama_model * model),
"use llama_model_free instead");
LLAMA_API void llama_model_free(struct llama_model * model);
LLAMA_API struct llama_context * llama_init_from_model(
struct llama_model * model,
struct llama_context_params params);
DEPRECATED(
|
LLAMA_API struct llama_context * llama_new_context_with_model(
struct llama_model * model,
struct llama_context_params params)
|
,
"use llama_init_from_model instead");
// Frees all allocated memory
LLAMA_API void llama_free(struct llama_context * ctx);
LLAMA_API int64_t llama_time_us(void);
LLAMA_API size_t llama_max_devices(void);
LLAMA_API size_t llama_max_parallel_sequences(void);
LLAMA_API bool llama_supports_mmap (void);
LLAMA_API bool llama_supports_mlock (void);
LLAMA_API bool llama_supports_gpu_offload(void);
LLAMA_API bool llama_supports_rpc (void);
LLAMA_API uint32_t llama_n_ctx (const struct llama_context * ctx);
LLAMA_API uint32_t llama_n_batch (const struct llama_context * ctx);
LLAMA_API uint32_t llama_n_ubatch
|
ast_based
|
<fim_prefix>KeyEvent(key, (ev->type == ALLEGRO_EVENT_KEY_DOWN));
io.SetKeyEventNativeData(key, ev->keyboard.keycode, -1); // To support legacy indexing (<1.87 user code)
}
return true;
case ALLEGRO_EVENT_DISPLAY_SWITCH_OUT:
if (ev->display.source == bd->Display)
io.AddFocusEvent(false);
return true;
case ALLEGRO_EVENT_DISPLAY_SWITCH_IN:
if (ev->display.source == bd->Display)
{
io.AddFocusEvent(true);
#if defined(ALLEGRO_UNSTABLE)
al_clear_keyboard_state(bd->Display);
#endif
}
return true;
}
return false;
}
static void ImGui_ImplAllegro5_UpdateMouseCursor()
{
ImGuiIO& io = <fim_suffix>;
if (io.ConfigFlags & ImGuiConfigFlags_NoMouseCursorChange)
return;
ImGui_ImplAllegro5_Data* bd = ImGui_ImplAllegro5_GetBackendData();
ImGuiMouseCursor imgui_cursor = ImGui::GetMouseCursor();
// Hide OS mouse cursor if imgui is drawing it
if (io.MouseDrawCursor)
imgui_cursor = ImGuiMouseCursor_None;
if (bd->LastCursor == imgui_cursor)
return;
bd->LastCursor = imgui_cursor;
if (imgui_cursor == ImGuiMouseCursor_None)
{
al_set_mouse_cursor(bd->Display, bd->MouseCursorInvisible);
}
else
{
ALLEGRO_SYSTEM_MOUSE_CURSOR cursor_id = ALLEGRO_SYSTEM_MOUSE_CURSOR_DEFAULT;
switch (imgui_cursor)
{
<fim_middle>ImGui::GetIO()<fim_end>
|
KeyEvent(key, (ev->type == ALLEGRO_EVENT_KEY_DOWN));
io.SetKeyEventNativeData(key, ev->keyboard.keycode, -1); // To support legacy indexing (<1.87 user code)
}
return true;
case ALLEGRO_EVENT_DISPLAY_SWITCH_OUT:
if (ev->display.source == bd->Display)
io.AddFocusEvent(false);
return true;
case ALLEGRO_EVENT_DISPLAY_SWITCH_IN:
if (ev->display.source == bd->Display)
{
io.AddFocusEvent(true);
#if defined(ALLEGRO_UNSTABLE)
al_clear_keyboard_state(bd->Display);
#endif
}
return true;
}
return false;
}
static void ImGui_ImplAllegro5_UpdateMouseCursor()
{
ImGuiIO& io =
|
ImGui::GetIO()
|
;
if (io.ConfigFlags & ImGuiConfigFlags_NoMouseCursorChange)
return;
ImGui_ImplAllegro5_Data* bd = ImGui_ImplAllegro5_GetBackendData();
ImGuiMouseCursor imgui_cursor = ImGui::GetMouseCursor();
// Hide OS mouse cursor if imgui is drawing it
if (io.MouseDrawCursor)
imgui_cursor = ImGuiMouseCursor_None;
if (bd->LastCursor == imgui_cursor)
return;
bd->LastCursor = imgui_cursor;
if (imgui_cursor == ImGuiMouseCursor_None)
{
al_set_mouse_cursor(bd->Display, bd->MouseCursorInvisible);
}
else
{
ALLEGRO_SYSTEM_MOUSE_CURSOR cursor_id = ALLEGRO_SYSTEM_MOUSE_CURSOR_DEFAULT;
switch (imgui_cursor)
{
|
ast_based
|
<fim_prefix>kRemoteFunction:
return "Remote Function";
case ActivityCategory::kMisc:
return "Miscellaneous";
case ActivityCategory::kDatasetOp:
return "Dataset Op";
case ActivityCategory::kTpuOp:
return "TPU Op";
case ActivityCategory::kRendezvous:
return "Rendezvous";
}
}
// An activity to be recorded.
struct Activity {
using Attributes = absl::flat_hash_map<tsl::string, tsl::string>;
// A human readable title of the activity.
tsl::string title;
// The category of the activity.
ActivityCategory category = ActivityCategory::kMisc;
// Key/value pairs that are attached to the activity.
Attributes attributes;
Activity() = default;
Activity(<fim_suffix>, ActivityCategory category)
: title(std::move(title)), category(category) {}
Activity(tsl::string title, ActivityCategory category, Attributes attributes)
: title(std::move(title)),
category(category),
attributes(std::move(attributes)) {}
};
// Enable activity wathcer to send own workers activities to coordination
// service and also fetch all workers' activities.
void MaybeEnableMultiWorkersWatching(tsl::CoordinationServiceAgent* agent);
namespace tfw_internal {
#if defined(TF_ENABLE_ACTIVITY_WATCHER)
// Records an activity start without checking whether the watcher is enabled.
ActivityId RecordActivityStart(std::unique_ptr<Activity> activity);
// Records <fim_middle>tsl::string title<fim_end>
|
kRemoteFunction:
return "Remote Function";
case ActivityCategory::kMisc:
return "Miscellaneous";
case ActivityCategory::kDatasetOp:
return "Dataset Op";
case ActivityCategory::kTpuOp:
return "TPU Op";
case ActivityCategory::kRendezvous:
return "Rendezvous";
}
}
// An activity to be recorded.
struct Activity {
using Attributes = absl::flat_hash_map<tsl::string, tsl::string>;
// A human readable title of the activity.
tsl::string title;
// The category of the activity.
ActivityCategory category = ActivityCategory::kMisc;
// Key/value pairs that are attached to the activity.
Attributes attributes;
Activity() = default;
Activity(
|
tsl::string title
|
, ActivityCategory category)
: title(std::move(title)), category(category) {}
Activity(tsl::string title, ActivityCategory category, Attributes attributes)
: title(std::move(title)),
category(category),
attributes(std::move(attributes)) {}
};
// Enable activity wathcer to send own workers activities to coordination
// service and also fetch all workers' activities.
void MaybeEnableMultiWorkersWatching(tsl::CoordinationServiceAgent* agent);
namespace tfw_internal {
#if defined(TF_ENABLE_ACTIVITY_WATCHER)
// Records an activity start without checking whether the watcher is enabled.
ActivityId RecordActivityStart(std::unique_ptr<Activity> activity);
// Records
|
ast_based
|
<fim_prefix> "\"pp\": %d, \"tg\": %d, \"pl\": %d, \"n_kv\": %d, \"t_pp\": %f, \"speed_pp\": %f, \"t_tg\": %f, \"speed_tg\": %f, \"t\": %f, \"speed\": %f}\n",
n_kv_max, params.n_batch, params.n_ubatch, int(params.flash_attn_type), params.is_pp_shared, params.n_gpu_layers, ctx_params.n_threads, ctx_params.n_threads_batch,
pp, tg, pl, n_kv, t_pp, speed_pp, t_tg, speed_tg, t, speed
);
} else {
LOG("|%6d | %6d | %4d | %6d | %8.3f | %8.2f | %8.3f | %8.2f | %8.3f | %8.2f |\n", pp, tg, pl, n_kv, t_pp, speed_pp, t_tg, speed_tg, t, speed);
}
}
}<fim_suffix> llama_perf_context_print(ctx);
llama_batch_free(batch);
llama_free(ctx);
llama_model_free(model);
llama_backend_free();
LOG("\n\n");
return 0;
}
<fim_middle> }
LOG("\n");<fim_end>
|
"\"pp\": %d, \"tg\": %d, \"pl\": %d, \"n_kv\": %d, \"t_pp\": %f, \"speed_pp\": %f, \"t_tg\": %f, \"speed_tg\": %f, \"t\": %f, \"speed\": %f}\n",
n_kv_max, params.n_batch, params.n_ubatch, int(params.flash_attn_type), params.is_pp_shared, params.n_gpu_layers, ctx_params.n_threads, ctx_params.n_threads_batch,
pp, tg, pl, n_kv, t_pp, speed_pp, t_tg, speed_tg, t, speed
);
} else {
LOG("|%6d | %6d | %4d | %6d | %8.3f | %8.2f | %8.3f | %8.2f | %8.3f | %8.2f |\n", pp, tg, pl, n_kv, t_pp, speed_pp, t_tg, speed_tg, t, speed);
}
}
}
|
}
LOG("\n");
|
llama_perf_context_print(ctx);
llama_batch_free(batch);
llama_free(ctx);
llama_model_free(model);
llama_backend_free();
LOG("\n\n");
return 0;
}
|
random
|
<fim_prefix>
role_map[DisplayServer::AccessibilityRole::ROLE_BUTTON] = ACCESSKIT_ROLE_BUTTON;
role_map[DisplayServer::AccessibilityRole::ROLE_LINK] = ACCESSKIT_ROLE_LINK;
role_map[DisplayServer::AccessibilityRole::ROLE_CHECK_BOX] = ACCESSKIT_ROLE_CHECK_BOX;
role_map[DisplayServer::AccessibilityRole::ROLE_RADIO_BUTTON] = ACCESSKIT_ROLE_RADIO_BUTTON;
role_map[DisplayServer::AccessibilityRole::ROLE_CHECK_BUTTON] = ACCESSKIT_ROLE_SWITCH;
role_map[DisplayServer::AccessibilityRole::ROLE_SCROLL_BAR] = ACCESSKIT_ROLE_SCROLL_BAR;
role_map[DisplayServer::AccessibilityRole::ROLE_SCROLL_VIEW] = ACCESSKIT_ROLE_SCROLL_VIEW;
role_map[DisplayServer::AccessibilityRole::ROLE_SPLITTER] = ACCESSKIT_ROLE_SPLITTER;
<fim_suffix>
role_map[DisplayServer::AccessibilityRole::ROLE_SPIN_BUTTON] = ACCESSKIT_ROLE_SPIN_BUTTON;
role_map[DisplayServer::AccessibilityRole::ROLE_PROGRESS_INDICATOR] = ACCESSKIT_ROLE_PROGRESS_INDICATOR;
role_map[DisplayServer::AccessibilityRole::ROLE_TEXT_FIELD] = ACCESSKIT_ROLE_TEXT_INPUT;
role_map[DisplayServer::AccessibilityRole::ROLE_MULTILINE_TEXT_FIELD] = ACCESSKIT_ROLE_MULTILINE_TEXT_INPUT;
role_map[DisplayServer::AccessibilityRole::ROLE_COLOR_PICKER] = ACCESSKIT_ROLE_COLOR_WELL;
role_map[DisplayServer::AccessibilityRole::ROLE_TABLE] = ACCESSKIT_ROLE_TABLE;
role_map[DisplayServer::AccessibilityRole::ROLE_CELL] = ACCESSKIT_ROLE_CELL;
role_map[DisplayServer::AccessibilityRole::ROLE_RO<fim_middle>role_map[DisplayServer::AccessibilityRole::ROLE_SLIDER] = ACCESSKIT_ROLE_SLIDER;<fim_end>
|
role_map[DisplayServer::AccessibilityRole::ROLE_BUTTON] = ACCESSKIT_ROLE_BUTTON;
role_map[DisplayServer::AccessibilityRole::ROLE_LINK] = ACCESSKIT_ROLE_LINK;
role_map[DisplayServer::AccessibilityRole::ROLE_CHECK_BOX] = ACCESSKIT_ROLE_CHECK_BOX;
role_map[DisplayServer::AccessibilityRole::ROLE_RADIO_BUTTON] = ACCESSKIT_ROLE_RADIO_BUTTON;
role_map[DisplayServer::AccessibilityRole::ROLE_CHECK_BUTTON] = ACCESSKIT_ROLE_SWITCH;
role_map[DisplayServer::AccessibilityRole::ROLE_SCROLL_BAR] = ACCESSKIT_ROLE_SCROLL_BAR;
role_map[DisplayServer::AccessibilityRole::ROLE_SCROLL_VIEW] = ACCESSKIT_ROLE_SCROLL_VIEW;
role_map[DisplayServer::AccessibilityRole::ROLE_SPLITTER] = ACCESSKIT_ROLE_SPLITTER;
|
role_map[DisplayServer::AccessibilityRole::ROLE_SLIDER] = ACCESSKIT_ROLE_SLIDER;
|
role_map[DisplayServer::AccessibilityRole::ROLE_SPIN_BUTTON] = ACCESSKIT_ROLE_SPIN_BUTTON;
role_map[DisplayServer::AccessibilityRole::ROLE_PROGRESS_INDICATOR] = ACCESSKIT_ROLE_PROGRESS_INDICATOR;
role_map[DisplayServer::AccessibilityRole::ROLE_TEXT_FIELD] = ACCESSKIT_ROLE_TEXT_INPUT;
role_map[DisplayServer::AccessibilityRole::ROLE_MULTILINE_TEXT_FIELD] = ACCESSKIT_ROLE_MULTILINE_TEXT_INPUT;
role_map[DisplayServer::AccessibilityRole::ROLE_COLOR_PICKER] = ACCESSKIT_ROLE_COLOR_WELL;
role_map[DisplayServer::AccessibilityRole::ROLE_TABLE] = ACCESSKIT_ROLE_TABLE;
role_map[DisplayServer::AccessibilityRole::ROLE_CELL] = ACCESSKIT_ROLE_CELL;
role_map[DisplayServer::AccessibilityRole::ROLE_RO
|
ast_based
|
<fim_prefix>t read_f32() {
std::float_t ret;
read_raw(&ret, sizeof(ret));
return ret;
}
std::string read_string(std::uint32_t len) {
std::vector<char> chars(len);
read_raw(chars.data(), len);
return std::string(chars.data(), len);
}
~my_llama_file() {
if (fp) {
std::fclose(fp);
}
}
};
static bool is_ggml_file(const char * filename) {
my_llama_file file(filename, "rb");
if (file.size < 4) {
return false;
}
std::string magic = file.read_string(4);
return magic == GGUF_MAGIC;
}
static std::string llama_escape_whitespaces(const std::string & text) {
std::ostringstream out;
<fim_suffix>
}
static void load_vocab(const char * filename, const Config * config, struct my_llama_vocab * vocab) {
if (is_ggml_file(filename)) {
LOG_INF("%s: Loading vocabulary from gguf file %s\n", __func__, filename);
struct ggml_context * ctx_data = NULL;
struct gguf_init_params params = {
/*.no_alloc = */ false,
/*.ctx = */ &ctx_data,
};
struct gguf_context * ctx = gguf_init_from_file(filename, params);
GGML_ASSERT(ctx != NULL);
const int model_idx = gguf_find_key(ctx, KV_TOKENIZER_MODEL);
GGML_ASSERT(model_idx >= 0);
std::string tokenizer_name = gguf_get_val_str(ctx, model_idx);
GG<fim_middle>for (char c : text) {
if (c == ' ') out << "\xe2\x96\x81";
else out << c;
}
return out.str();<fim_end>
|
t read_f32() {
std::float_t ret;
read_raw(&ret, sizeof(ret));
return ret;
}
std::string read_string(std::uint32_t len) {
std::vector<char> chars(len);
read_raw(chars.data(), len);
return std::string(chars.data(), len);
}
~my_llama_file() {
if (fp) {
std::fclose(fp);
}
}
};
static bool is_ggml_file(const char * filename) {
my_llama_file file(filename, "rb");
if (file.size < 4) {
return false;
}
std::string magic = file.read_string(4);
return magic == GGUF_MAGIC;
}
static std::string llama_escape_whitespaces(const std::string & text) {
std::ostringstream out;
|
for (char c : text) {
if (c == ' ') out << "\xe2\x96\x81";
else out << c;
}
return out.str();
|
}
static void load_vocab(const char * filename, const Config * config, struct my_llama_vocab * vocab) {
if (is_ggml_file(filename)) {
LOG_INF("%s: Loading vocabulary from gguf file %s\n", __func__, filename);
struct ggml_context * ctx_data = NULL;
struct gguf_init_params params = {
/*.no_alloc = */ false,
/*.ctx = */ &ctx_data,
};
struct gguf_context * ctx = gguf_init_from_file(filename, params);
GGML_ASSERT(ctx != NULL);
const int model_idx = gguf_find_key(ctx, KV_TOKENIZER_MODEL);
GGML_ASSERT(model_idx >= 0);
std::string tokenizer_name = gguf_get_val_str(ctx, model_idx);
GG
|
ast_based
|
<fim_prefix> }
}
/**
* Return the length of the output text string, as UTF8, assuming
* liberally two spacing marks after each word (as paragraphs end with two
* newlines), and assuming a single character reject marker for each rejected
* character.
* Also return the number of recognized blobs in blob_count.
*/
int TessBaseAPI::TextLength(int *blob_count) const {
if (tesseract_ == nullptr || page_res_ == nullptr) {
return 0;
}
PAGE_RES_IT page_res_it(page_res_);
int total_length = 2;
int total_blobs = 0;
// Iterate over the data structures to extract the recognition result.
for (page_res_it.restart_page(); page_res_it.word() != nullptr; page_res_it.forward()) {<fim_suffix> if (choice != nullptr) {
total_blobs += choice->length() + 2;
total_length += choice->unichar_string().length() + 2;
for (int i = 0; i < word->reject_map.length(); ++i) {
if (word->reject_map[i].rejected()) {
++total_length;
}
}
}
}
if (blob_count != nullptr) {
*blob_count = total_blobs;
}
return total_length;
}
#ifndef DISABLED_LEGACY_ENGINE
/**
* Estimates the Orientation And Script of the image.
* Returns true if the image was processed successfully.
*/
bool TessBaseAPI::DetectOS(OSResults *osr) {
if (tesseract_ == nullptr) {
return false;
}
ClearResults();<fim_middle> WERD_RES *word = page_res_it.word();
WERD_CHOICE *choice = word->best_choice;<fim_end>
|
}
}
/**
* Return the length of the output text string, as UTF8, assuming
* liberally two spacing marks after each word (as paragraphs end with two
* newlines), and assuming a single character reject marker for each rejected
* character.
* Also return the number of recognized blobs in blob_count.
*/
int TessBaseAPI::TextLength(int *blob_count) const {
if (tesseract_ == nullptr || page_res_ == nullptr) {
return 0;
}
PAGE_RES_IT page_res_it(page_res_);
int total_length = 2;
int total_blobs = 0;
// Iterate over the data structures to extract the recognition result.
for (page_res_it.restart_page(); page_res_it.word() != nullptr; page_res_it.forward()) {
|
WERD_RES *word = page_res_it.word();
WERD_CHOICE *choice = word->best_choice;
|
if (choice != nullptr) {
total_blobs += choice->length() + 2;
total_length += choice->unichar_string().length() + 2;
for (int i = 0; i < word->reject_map.length(); ++i) {
if (word->reject_map[i].rejected()) {
++total_length;
}
}
}
}
if (blob_count != nullptr) {
*blob_count = total_blobs;
}
return total_length;
}
#ifndef DISABLED_LEGACY_ENGINE
/**
* Estimates the Orientation And Script of the image.
* Returns true if the image was processed successfully.
*/
bool TessBaseAPI::DetectOS(OSResults *osr) {
if (tesseract_ == nullptr) {
return false;
}
ClearResults();
|
random
|
<fim_prefix> word_res->word->set_text(wordstr);
// Check to see if text matches wordstr.
int w = 0;
int t;
for (t = 0; text[t] != '\0'; ++t) {
if (text[t] == '\n' || text[t] == ' ') {
continue;
}
while (wordstr[w] == ' ') {
++w;
}
if (text[t] != wordstr[w]) {
break;
}
++w;
}
if (text[t] != '\0' || wordstr[w] != '\0') {
// No match.
delete page_res_;
std::vector<TBOX> boxes;
page_res_ = tesseract_->SetupApplyBoxes(boxes, block_list_);
tesseract_->ReSegmentByClassification(page_res_);
tesseract_->TidyUp(page_res_);<fim_suffix> if (pr_it.word() == nullptr) {
success = false;
} else {
word_res = pr_it.word();
}
} else {
word_res->BestChoiceToCorrectText();
}
if (success) {
tesseract_->EnableLearning = true;
tesseract_->LearnWord(nullptr, word_res);
}
} else {
success = false;
}
} else {
success = false;
}
SetPageSegMode(current_psm);
return success;
}
#endif // ndef DISABLED_LEGACY_ENGINE
/**
* Free up recognition results and any stored image data, without actually
* freeing any recognition data that would be time-consuming to reload.<fim_middle> PAGE_RES_IT pr_it(page_res_);<fim_end>
|
word_res->word->set_text(wordstr);
// Check to see if text matches wordstr.
int w = 0;
int t;
for (t = 0; text[t] != '\0'; ++t) {
if (text[t] == '\n' || text[t] == ' ') {
continue;
}
while (wordstr[w] == ' ') {
++w;
}
if (text[t] != wordstr[w]) {
break;
}
++w;
}
if (text[t] != '\0' || wordstr[w] != '\0') {
// No match.
delete page_res_;
std::vector<TBOX> boxes;
page_res_ = tesseract_->SetupApplyBoxes(boxes, block_list_);
tesseract_->ReSegmentByClassification(page_res_);
tesseract_->TidyUp(page_res_);
|
PAGE_RES_IT pr_it(page_res_);
|
if (pr_it.word() == nullptr) {
success = false;
} else {
word_res = pr_it.word();
}
} else {
word_res->BestChoiceToCorrectText();
}
if (success) {
tesseract_->EnableLearning = true;
tesseract_->LearnWord(nullptr, word_res);
}
} else {
success = false;
}
} else {
success = false;
}
SetPageSegMode(current_psm);
return success;
}
#endif // ndef DISABLED_LEGACY_ENGINE
/**
* Free up recognition results and any stored image data, without actually
* freeing any recognition data that would be time-consuming to reload.
|
random
|
<fim_prefix> astcenc_context *context = nullptr;
const unsigned int thread_count = 1;
status = astcenc_context_alloc(&config, thread_count, &context);
ERR_FAIL_COND_MSG(status != ASTCENC_SUCCESS,
vformat("astcenc: Context allocation failed: %s.", astcenc_get_error_string(status)));
const Image::Format target_format = is_hdr ? Image::FORMAT_RGBAH : Image::FORMAT_RGBA8;
const bool has_mipmaps = r_img->has_mipmaps();
int width = r_img->get_width();
int height = r_img->get_height();
const int64_t dest_size = Image::get_image_data_size(width, height, target_format, has_mipmaps);
Vector<uint8_t> dest_data;
dest_data.resize(dest_size);
uint8_t *dest_write = dest_data.ptrw();<fim_suffix>
for (int i = 0; i < mip_count + 1; i++) {
const int64_t src_ofs = Image::get_image_mipmap_offset(width, height, src_format, i);
const uint8_t *mip_data = &src_data[src_ofs];
int64_t src_size;
if (i == mip_count) {
src_size = r_img->get_data_size() - src_ofs;
} else {
src_size = Image::get_image_mipmap_offset(width, height, src_format, i + 1) - src_ofs;
}
int dst_mip_w, dst_mip_h;
const int64_t dst_ofs = Image::get_image_mipmap_offset_and_dimensions(width, height, target_format, i, dst_mip_w, dst_mip_h);
// Ensure that mip offset is a multiple of 8 (etcpak expects uint64_t pointer).
ERR_FAIL_COND(dst_ofs % 8 != 0);<fim_middle>
// Decompress image.
const int mip_count = has_mipmaps ? Image::get_image_required_mipmaps(width, height, target_format) : 0;
const uint8_t *src_data = r_img->ptr();<fim_end>
|
astcenc_context *context = nullptr;
const unsigned int thread_count = 1;
status = astcenc_context_alloc(&config, thread_count, &context);
ERR_FAIL_COND_MSG(status != ASTCENC_SUCCESS,
vformat("astcenc: Context allocation failed: %s.", astcenc_get_error_string(status)));
const Image::Format target_format = is_hdr ? Image::FORMAT_RGBAH : Image::FORMAT_RGBA8;
const bool has_mipmaps = r_img->has_mipmaps();
int width = r_img->get_width();
int height = r_img->get_height();
const int64_t dest_size = Image::get_image_data_size(width, height, target_format, has_mipmaps);
Vector<uint8_t> dest_data;
dest_data.resize(dest_size);
uint8_t *dest_write = dest_data.ptrw();
|
// Decompress image.
const int mip_count = has_mipmaps ? Image::get_image_required_mipmaps(width, height, target_format) : 0;
const uint8_t *src_data = r_img->ptr();
|
for (int i = 0; i < mip_count + 1; i++) {
const int64_t src_ofs = Image::get_image_mipmap_offset(width, height, src_format, i);
const uint8_t *mip_data = &src_data[src_ofs];
int64_t src_size;
if (i == mip_count) {
src_size = r_img->get_data_size() - src_ofs;
} else {
src_size = Image::get_image_mipmap_offset(width, height, src_format, i + 1) - src_ofs;
}
int dst_mip_w, dst_mip_h;
const int64_t dst_ofs = Image::get_image_mipmap_offset_and_dimensions(width, height, target_format, i, dst_mip_w, dst_mip_h);
// Ensure that mip offset is a multiple of 8 (etcpak expects uint64_t pointer).
ERR_FAIL_COND(dst_ofs % 8 != 0);
|
random
|
<fim_prefix> training font and returns it in fontname, by cutting
// it out based on the expectation that the filename is of the form:
// /path/to/dir/[lang].[fontname].exp[num]
// The [lang], [fontname] and [num] fields should not have '.' characters.
// If the global parameter classify_font_name is set, its value is used instead.
static void ExtractFontName(const char* filename, std::string* fontname) {
*fontname = classify_font_name;
if (*fontname == kUnknownFontName) {
// filename is expected to be of the form [lang].[fontname].exp[num]
// The [lang], [fontname] and [num] fields should not have '.' characters.
const char *basename = strrchr(filename, '/');
const char *firstdot = <fim_suffix>;
const char *lastdot = strrchr(filename, '.');
if (firstdot != lastdot && firstdot != nullptr && lastdot != nullptr) {
++firstdot;
*fontname = firstdot;
fontname->resize(lastdot - firstdot);
}
}
}
#endif
/* Add all available languages recursively.
*/
static void addAvailableLanguages(const std::string &datadir,
std::vector<std::string> *langs) {
for (const auto& entry :
std::filesystem::recursive_directory_iterator(datadir,
std::filesystem::directory_options::follow_directory_symlink |
std::filesystem::directory_options::skip_permission_denied)) {
auto path = entry.path().lexically_relative(d<fim_middle>strchr(basename ? basename : filename, '.')<fim_end>
|
training font and returns it in fontname, by cutting
// it out based on the expectation that the filename is of the form:
// /path/to/dir/[lang].[fontname].exp[num]
// The [lang], [fontname] and [num] fields should not have '.' characters.
// If the global parameter classify_font_name is set, its value is used instead.
static void ExtractFontName(const char* filename, std::string* fontname) {
*fontname = classify_font_name;
if (*fontname == kUnknownFontName) {
// filename is expected to be of the form [lang].[fontname].exp[num]
// The [lang], [fontname] and [num] fields should not have '.' characters.
const char *basename = strrchr(filename, '/');
const char *firstdot =
|
strchr(basename ? basename : filename, '.')
|
;
const char *lastdot = strrchr(filename, '.');
if (firstdot != lastdot && firstdot != nullptr && lastdot != nullptr) {
++firstdot;
*fontname = firstdot;
fontname->resize(lastdot - firstdot);
}
}
}
#endif
/* Add all available languages recursively.
*/
static void addAvailableLanguages(const std::string &datadir,
std::vector<std::string> *langs) {
for (const auto& entry :
std::filesystem::recursive_directory_iterator(datadir,
std::filesystem::directory_options::follow_directory_symlink |
std::filesystem::directory_options::skip_permission_denied)) {
auto path = entry.path().lexically_relative(d
|
ast_based
|
<fim_prefix>));
Variant variant = ProjectSettings::get_singleton()->get_setting("not_existing_setting");
CHECK_EQ(variant.get_type(), Variant::NIL);
variant = ProjectSettings::get_singleton()->get_setting("not_existing_setting", "my_nice_default_value");
CHECK_EQ(variant.get_type(), Variant::STRING);
String name = variant;
CHECK_EQ(name, "my_nice_default_value");
CHECK_FALSE(ProjectSettings::get_singleton()->has_setting("not_existing_setting"));
}
TEST_CASE("[ProjectSettings] Set value should be returned when retrieved") {
CHECK_FALSE(ProjectSettings::get_singleton()->has_setting("my_custom_setting"));
Variant variant = ProjectSettings::get_singleton()->get_setting("my_custom_setting");
<fim_suffix>;
ProjectSettings::get_singleton()->set_setting("my_custom_setting", true);
CHECK(ProjectSettings::get_singleton()->has_setting("my_custom_setting"));
variant = ProjectSettings::get_singleton()->get_setting("my_custom_setting");
CHECK_EQ(variant.get_type(), Variant::BOOL);
bool value = variant;
CHECK_EQ(true, value);
CHECK(ProjectSettings::get_singleton()->has_setting("my_custom_setting"));
}
TEST_CASE("[ProjectSettings] localize_path") {
String old_resource_path = TestProjectSettingsInternalsAccessor::resource_path();
TestProjectSettingsInternalsAccessor::resource_path() = DirAccess::create(DirAccess::ACCESS_FILESYSTEM)->get_current_dir();
String root_path = ProjectSettings::<fim_middle>CHECK_EQ(variant.get_type(), Variant::NIL)<fim_end>
|
));
Variant variant = ProjectSettings::get_singleton()->get_setting("not_existing_setting");
CHECK_EQ(variant.get_type(), Variant::NIL);
variant = ProjectSettings::get_singleton()->get_setting("not_existing_setting", "my_nice_default_value");
CHECK_EQ(variant.get_type(), Variant::STRING);
String name = variant;
CHECK_EQ(name, "my_nice_default_value");
CHECK_FALSE(ProjectSettings::get_singleton()->has_setting("not_existing_setting"));
}
TEST_CASE("[ProjectSettings] Set value should be returned when retrieved") {
CHECK_FALSE(ProjectSettings::get_singleton()->has_setting("my_custom_setting"));
Variant variant = ProjectSettings::get_singleton()->get_setting("my_custom_setting");
|
CHECK_EQ(variant.get_type(), Variant::NIL)
|
;
ProjectSettings::get_singleton()->set_setting("my_custom_setting", true);
CHECK(ProjectSettings::get_singleton()->has_setting("my_custom_setting"));
variant = ProjectSettings::get_singleton()->get_setting("my_custom_setting");
CHECK_EQ(variant.get_type(), Variant::BOOL);
bool value = variant;
CHECK_EQ(true, value);
CHECK(ProjectSettings::get_singleton()->has_setting("my_custom_setting"));
}
TEST_CASE("[ProjectSettings] localize_path") {
String old_resource_path = TestProjectSettingsInternalsAccessor::resource_path();
TestProjectSettingsInternalsAccessor::resource_path() = DirAccess::create(DirAccess::ACCESS_FILESYSTEM)->get_current_dir();
String root_path = ProjectSettings::
|
ast_based
|
<fim_prefix>lk.%d.ffn_up.weight"
#if defined(_MSC_VER)
#pragma warning(disable: 4244 4267) // possible loss of data
#endif
#define LLAMA_FILE_MAGIC_GGJT 0x67676a74u // 'ggjt'
#define LLAMA_FILE_VERSION_GGJT_V3 3
#define TOKENIZER_NAME "llama"
#define UNKNOWN_TOKEN_ID 0
#define BOS_TOKEN_ID 1
#define EOS_TOKEN_ID 2
//////////////////////////////////////// llama2.c model structs and functions to load models, alloc memory etc.
typedef struct {
int dim; // transformer dimension
int hidden_dim; // for ffn layers
int n_layers; // number of layers
int n_heads; // number of query heads
int n_kv_heads; // number of key/value heads (can be < query heads because of multiquery)
<fim_suffix> // vocabulary size, usually 256 (byte-level)
int seq_len; // max sequence length
} Config;
struct TransformerWeights {
// token embedding table
std::vector<float> token_embedding_table; // (vocab_size, dim)
// weights for rmsnorms
std::vector<float> rms_att_weight; // (layer, dim) rmsnorm weights
std::vector<float> rms_ffn_weight; // (layer, dim)
// weights for matmuls
std::vector<float> wq; // (layer, dim, dim)
std::vector<float> wk; // (layer, dim, dim)
std::vector<float> wv; // (layer, dim, dim)
std::vector<float> wo; // (layer, dim, dim)
// weights for ffn
std::vector<float> w1; // (layer, hidden_dim, dim)
std::vector<float> w2<fim_middle>int vocab_size;<fim_end>
|
lk.%d.ffn_up.weight"
#if defined(_MSC_VER)
#pragma warning(disable: 4244 4267) // possible loss of data
#endif
#define LLAMA_FILE_MAGIC_GGJT 0x67676a74u // 'ggjt'
#define LLAMA_FILE_VERSION_GGJT_V3 3
#define TOKENIZER_NAME "llama"
#define UNKNOWN_TOKEN_ID 0
#define BOS_TOKEN_ID 1
#define EOS_TOKEN_ID 2
//////////////////////////////////////// llama2.c model structs and functions to load models, alloc memory etc.
typedef struct {
int dim; // transformer dimension
int hidden_dim; // for ffn layers
int n_layers; // number of layers
int n_heads; // number of query heads
int n_kv_heads; // number of key/value heads (can be < query heads because of multiquery)
|
int vocab_size;
|
// vocabulary size, usually 256 (byte-level)
int seq_len; // max sequence length
} Config;
struct TransformerWeights {
// token embedding table
std::vector<float> token_embedding_table; // (vocab_size, dim)
// weights for rmsnorms
std::vector<float> rms_att_weight; // (layer, dim) rmsnorm weights
std::vector<float> rms_ffn_weight; // (layer, dim)
// weights for matmuls
std::vector<float> wq; // (layer, dim, dim)
std::vector<float> wk; // (layer, dim, dim)
std::vector<float> wv; // (layer, dim, dim)
std::vector<float> wo; // (layer, dim, dim)
// weights for ffn
std::vector<float> w1; // (layer, hidden_dim, dim)
std::vector<float> w2
|
ast_based
|
<fim_prefix>/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */<fim_suffix>/**************************************************************************/
#pragma once
#include "core/config/project_settings.h"
#include "core/io/dir_access.h"
#include "core/variant/variant.h"
#include "tests/test_macros.h"
class TestProjectSettingsInternalsAccessor {
public:
static String &resource_path() {
return ProjectSettings::get_singleton()->resource_path;
}
};
namespace TestProjectSettings {
TEST_CASE("[ProjectSettings] Get existing setting") {
CHECK(ProjectSettings::get_singleton()->has_setting("application/run/main_scene"));
Variant variant = ProjectSettings::get_singleton()->get_setting("application/run/main_scene");<fim_middle>/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */<fim_end>
|
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
|
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
|
/**************************************************************************/
#pragma once
#include "core/config/project_settings.h"
#include "core/io/dir_access.h"
#include "core/variant/variant.h"
#include "tests/test_macros.h"
class TestProjectSettingsInternalsAccessor {
public:
static String &resource_path() {
return ProjectSettings::get_singleton()->resource_path;
}
};
namespace TestProjectSettings {
TEST_CASE("[ProjectSettings] Get existing setting") {
CHECK(ProjectSettings::get_singleton()->has_setting("application/run/main_scene"));
Variant variant = ProjectSettings::get_singleton()->get_setting("application/run/main_scene");
|
random
|
<fim_prefix> }
if (wlen > 0) {
word_lengths.push_back(wlen);
total += wlen;
}
prev = words[j];
}
if (total < t.length()) {
word_lengths.push_back(t.length() - total);
}
accesskit_node_set_word_lengths(ae->node, word_lengths.size(), word_lengths.ptr());
// Char widths and positions.
Vector<float> char_positions;
Vector<float> char_widths;
char_positions.resize_initialized(t.length());
float *positions_ptr = char_positions.ptrw();
char_widths.resize_initialized(t.length());
float *widths_ptr = char_widths.ptrw();
float size_x = 0.0;
for (int j = gl_index; j < gl_count; j += gl[j].count) {
if (gl[j].start >= ae->run.y) {
gl_index = j;
break;<fim_suffix> }
int chars = gl[j].end - gl[j].start;
float adv_per_char = advance / (float)chars;
for (int k = 0; k < chars; k++) {
int index = gl[j].start + k - ae->run.x;
ERR_CONTINUE(index < 0 || index >= t.length());
positions_ptr[index] = size_x + adv_per_char * k;
widths_ptr[index] = adv_per_char;
}
size_x += advance * gl[j].repeat;
}
positions_ptr[t.length() - 1] = size_x;
widths_ptr[t.length() - 1] = 1.0;
accesskit_node_set_character_positions(ae->node, char_positions.size(), char_positions.ptr());
accesskit_node_set_character_widths(ae->node, char_widths.size(), char_widths.ptr());
RID font_rid = TS->shaped_get_run_font_rid(p_shaped_text, i);<fim_middle> }
float advance = 0.0; // Graphame advance.
for (int k = 0; k < gl[j].count; k++) {
advance += gl[j + k].advance;<fim_end>
|
}
if (wlen > 0) {
word_lengths.push_back(wlen);
total += wlen;
}
prev = words[j];
}
if (total < t.length()) {
word_lengths.push_back(t.length() - total);
}
accesskit_node_set_word_lengths(ae->node, word_lengths.size(), word_lengths.ptr());
// Char widths and positions.
Vector<float> char_positions;
Vector<float> char_widths;
char_positions.resize_initialized(t.length());
float *positions_ptr = char_positions.ptrw();
char_widths.resize_initialized(t.length());
float *widths_ptr = char_widths.ptrw();
float size_x = 0.0;
for (int j = gl_index; j < gl_count; j += gl[j].count) {
if (gl[j].start >= ae->run.y) {
gl_index = j;
break;
|
}
float advance = 0.0; // Graphame advance.
for (int k = 0; k < gl[j].count; k++) {
advance += gl[j + k].advance;
|
}
int chars = gl[j].end - gl[j].start;
float adv_per_char = advance / (float)chars;
for (int k = 0; k < chars; k++) {
int index = gl[j].start + k - ae->run.x;
ERR_CONTINUE(index < 0 || index >= t.length());
positions_ptr[index] = size_x + adv_per_char * k;
widths_ptr[index] = adv_per_char;
}
size_x += advance * gl[j].repeat;
}
positions_ptr[t.length() - 1] = size_x;
widths_ptr[t.length() - 1] = 1.0;
accesskit_node_set_character_positions(ae->node, char_positions.size(), char_positions.ptr());
accesskit_node_set_character_widths(ae->node, char_widths.size(), char_widths.ptr());
RID font_rid = TS->shaped_get_run_font_rid(p_shaped_text, i);
|
random
|
<fim_prefix> },
};
TEST_ASSERT(!checkCrcAgainstGondenSamples(hex::crypt::crc16, golden_samples));
TEST_SUCCESS();
};
TEST_SEQUENCE("CRC16Random") {
<fim_suffix>;
TEST_SUCCESS();
};
TEST_SEQUENCE("CRC8") {
std::array golden_samples = {
// source: Sunshine's Homepage - Online CRC Calculator Javascript [http://www.sunshine2k.de/coding/javascript/crc/crc_js.html]
CrcCheck {"CRC-8-0-check", 8, 0xD5, 0xff, 0x00, true, true, 0x7f, { 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39 } <fim_middle>TEST_ASSERT(!checkCrcAgainstRandomData(hex::crypt::crc16, 16))<fim_end>
|
},
};
TEST_ASSERT(!checkCrcAgainstGondenSamples(hex::crypt::crc16, golden_samples));
TEST_SUCCESS();
};
TEST_SEQUENCE("CRC16Random") {
|
TEST_ASSERT(!checkCrcAgainstRandomData(hex::crypt::crc16, 16))
|
;
TEST_SUCCESS();
};
TEST_SEQUENCE("CRC8") {
std::array golden_samples = {
// source: Sunshine's Homepage - Online CRC Calculator Javascript [http://www.sunshine2k.de/coding/javascript/crc/crc_js.html]
CrcCheck {"CRC-8-0-check", 8, 0xD5, 0xff, 0x00, true, true, 0x7f, { 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39 }
|
ast_based
|
<fim_prefix>
if (px >= limit && px < (get_size().width)) {
const Color color = get_theme_color(SNAME("accent_color"), EditorStringName(Editor));
play_position->draw_line(Point2(px, 0), Point2(px, h), color, Math::round(2 * EDSCALE));
}
}
void AnimationBezierTrackEdit::set_play_position(real_t p_pos) {
play_position_pos = p_pos;
play_position->queue_redraw();
}
void AnimationBezierTrackEdit::update_play_position() {
play_position->queue_redraw();
}
void AnimationBezierTrackEdit::set_root(Node *p_root) {
root = p_root;
}
void AnimationBezierTrackEdit::set_filtered(bool p_filtered) {
is_filtered = p_filtered;
if (animation.is_null()) {<fim_suffix> if (is_filtered) {
if (root && root->has_node(base_path)) {
Node *node = root->get_node(base_path);
if (!node || !EditorNode::get_singleton()->get_editor_selection()->is_selected(node)) {
for (int i = 0; i < animation->get_track_count(); ++i) {
if (animation->track_get_type(i) != Animation::TrackType::TYPE_BEZIER) {
continue;
}
base_path = String(animation->track_get_path(i));
if (root && root->has_node(base_path)) {
node = root->get_node(base_path);
if (!node) {
continue; // No node, no filter.
}
if (!EditorNode::get_singleton()->get_editor_selection()->is_selected(node)) {<fim_middle> return;
}
String base_path = String(animation->track_get_path(selected_track));<fim_end>
|
if (px >= limit && px < (get_size().width)) {
const Color color = get_theme_color(SNAME("accent_color"), EditorStringName(Editor));
play_position->draw_line(Point2(px, 0), Point2(px, h), color, Math::round(2 * EDSCALE));
}
}
void AnimationBezierTrackEdit::set_play_position(real_t p_pos) {
play_position_pos = p_pos;
play_position->queue_redraw();
}
void AnimationBezierTrackEdit::update_play_position() {
play_position->queue_redraw();
}
void AnimationBezierTrackEdit::set_root(Node *p_root) {
root = p_root;
}
void AnimationBezierTrackEdit::set_filtered(bool p_filtered) {
is_filtered = p_filtered;
if (animation.is_null()) {
|
return;
}
String base_path = String(animation->track_get_path(selected_track));
|
if (is_filtered) {
if (root && root->has_node(base_path)) {
Node *node = root->get_node(base_path);
if (!node || !EditorNode::get_singleton()->get_editor_selection()->is_selected(node)) {
for (int i = 0; i < animation->get_track_count(); ++i) {
if (animation->track_get_type(i) != Animation::TrackType::TYPE_BEZIER) {
continue;
}
base_path = String(animation->track_get_path(i));
if (root && root->has_node(base_path)) {
node = root->get_node(base_path);
if (!node) {
continue; // No node, no filter.
}
if (!EditorNode::get_singleton()->get_editor_selection()->is_selected(node)) {
|
random
|
<fim_prefix> StringName name = p_name;
if (name == StringName()) {
name = animation;
}
ERR_FAIL_COND_MSG(frames.is_null(), vformat("There is no animation with name '%s'.", name));
ERR_FAIL_COND_MSG(!frames->get_animation_names().has(name), vformat("There is no animation with name '%s'.", name));
if (frames->get_frame_count(name) == 0) {
return;
}
playing = true;
custom_speed_scale = p_custom_scale;
if (name != animation) {
animation = name;
int end_frame = MAX(0, frames->get_frame_count(animation) - 1);
if (p_from_end) {
set_frame_and_progress(end_frame, 1.0);
} else {
set_frame_and_progress(0, 0.0);<fim_suffix> int end_frame = MAX(0, frames->get_frame_count(animation) - 1);
bool is_backward = std::signbit(speed_scale * custom_speed_scale);
if (p_from_end && is_backward && frame == 0 && frame_progress <= 0.0) {
set_frame_and_progress(end_frame, 1.0);
} else if (!p_from_end && !is_backward && frame == end_frame && frame_progress >= 1.0) {
set_frame_and_progress(0, 0.0);
}
}
set_process_internal(true);
notify_property_list_changed();
queue_redraw();
}
void AnimatedSprite2D::play_backwards(const StringName &p_name) {
play(p_name, -1, true);
}
void AnimatedSprite2D::_stop_internal(bool p_reset) {
playing = false;
if (p_reset) {
custom_speed_scale = 1.0;<fim_middle> }
emit_signal(SceneStringName(animation_changed));
} else {<fim_end>
|
StringName name = p_name;
if (name == StringName()) {
name = animation;
}
ERR_FAIL_COND_MSG(frames.is_null(), vformat("There is no animation with name '%s'.", name));
ERR_FAIL_COND_MSG(!frames->get_animation_names().has(name), vformat("There is no animation with name '%s'.", name));
if (frames->get_frame_count(name) == 0) {
return;
}
playing = true;
custom_speed_scale = p_custom_scale;
if (name != animation) {
animation = name;
int end_frame = MAX(0, frames->get_frame_count(animation) - 1);
if (p_from_end) {
set_frame_and_progress(end_frame, 1.0);
} else {
set_frame_and_progress(0, 0.0);
|
}
emit_signal(SceneStringName(animation_changed));
} else {
|
int end_frame = MAX(0, frames->get_frame_count(animation) - 1);
bool is_backward = std::signbit(speed_scale * custom_speed_scale);
if (p_from_end && is_backward && frame == 0 && frame_progress <= 0.0) {
set_frame_and_progress(end_frame, 1.0);
} else if (!p_from_end && !is_backward && frame == end_frame && frame_progress >= 1.0) {
set_frame_and_progress(0, 0.0);
}
}
set_process_internal(true);
notify_property_list_changed();
queue_redraw();
}
void AnimatedSprite2D::play_backwards(const StringName &p_name) {
play(p_name, -1, true);
}
void AnimatedSprite2D::_stop_internal(bool p_reset) {
playing = false;
if (p_reset) {
custom_speed_scale = 1.0;
|
random
|
<fim_prefix>page_res_it.word();
WERD_CHOICE *choice = word->best_choice;
if (choice != nullptr) {
total_blobs += choice->length() + 2;
total_length += choice->unichar_string().length() + 2;
for (int i = 0; i < word->reject_map.length(); ++i) {
if (word->reject_map[i].rejected()) {
++total_length;
}
}
}
}
if (blob_count != nullptr) {
*blob_count = total_blobs;
}
return total_length;
}
#ifndef DISABLED_LEGACY_ENGINE
/**
* Estimates the Orientation And Script of the image.
* Returns true if the image was processed successfully.
*/
bool TessBaseAPI::DetectOS(OSResults *osr) {
if (tesseract_ == nullptr) {
return false;
}
<fim_suffix>
if (tesseract_->pix_binary() == nullptr && !Threshold(&tesseract_->mutable_pix_binary()->pix_)) {
return false;
}
if (input_file_.empty()) {
input_file_ = kInputFile;
}
return orientation_and_script_detection(input_file_.c_str(), osr, tesseract_) > 0;
}
#endif // #ifndef DISABLED_LEGACY_ENGINE
void TessBaseAPI::set_min_orientation_margin(double margin) {
tesseract_->min_orientation_margin.set_value(margin);
}
/**
* Return text orientation of each block as determined in an earlier page layout
* analysis operation. Orientation is returned as the number of ccw 90-degree
* rotations (in [0..3]) required to make the text in the block upright
* (readable). Note that thi<fim_middle>ClearResults();<fim_end>
|
page_res_it.word();
WERD_CHOICE *choice = word->best_choice;
if (choice != nullptr) {
total_blobs += choice->length() + 2;
total_length += choice->unichar_string().length() + 2;
for (int i = 0; i < word->reject_map.length(); ++i) {
if (word->reject_map[i].rejected()) {
++total_length;
}
}
}
}
if (blob_count != nullptr) {
*blob_count = total_blobs;
}
return total_length;
}
#ifndef DISABLED_LEGACY_ENGINE
/**
* Estimates the Orientation And Script of the image.
* Returns true if the image was processed successfully.
*/
bool TessBaseAPI::DetectOS(OSResults *osr) {
if (tesseract_ == nullptr) {
return false;
}
|
ClearResults();
|
if (tesseract_->pix_binary() == nullptr && !Threshold(&tesseract_->mutable_pix_binary()->pix_)) {
return false;
}
if (input_file_.empty()) {
input_file_ = kInputFile;
}
return orientation_and_script_detection(input_file_.c_str(), osr, tesseract_) > 0;
}
#endif // #ifndef DISABLED_LEGACY_ENGINE
void TessBaseAPI::set_min_orientation_margin(double margin) {
tesseract_->min_orientation_margin.set_value(margin);
}
/**
* Return text orientation of each block as determined in an earlier page layout
* analysis operation. Orientation is returned as the number of ccw 90-degree
* rotations (in [0..3]) required to make the text in the block upright
* (readable). Note that thi
|
ast_based
|
<fim_prefix> return CURSOR_BDIAGSIZE;
} else if (abs(scaling_selection_handles.x) == 1) {
return CURSOR_HSIZE;
} else if (abs(scaling_selection_handles.y) == 1) {
return CURSOR_VSIZE;
}
}
// Hovering the scaling box
const Vector2i rel_pos = p_pos - selection_rect.position;
if (selection_handles_rect.has_point(p_pos)) {
if ((rel_pos.x < 0 && rel_pos.y < 0) || (rel_pos.x > selection_rect.size.width && rel_pos.y > selection_rect.size.height)) {
return CURSOR_FDIAGSIZE;
} else if ((rel_pos.x < 0 && rel_pos.y > selection_rect.size.height) || (rel_pos.x > selection_rect.size.width && rel_pos.y < 0)) {<fim_suffix> } else if (rel_pos.x < 0 || rel_pos.x > selection_rect.size.width) {
return CURSOR_HSIZE;
} else if (rel_pos.y < 0 || rel_pos.y > selection_rect.size.height) {
return CURSOR_VSIZE;
}
return CURSOR_MOVE;
}
return get_default_cursor_shape();
}
void AnimationBezierTrackEdit::set_timeline(AnimationTimelineEdit *p_timeline) {
timeline = p_timeline;
timeline->connect("zoom_changed", callable_mp(this, &AnimationBezierTrackEdit::_zoom_changed));
timeline->connect("name_limit_changed", callable_mp(this, &AnimationBezierTrackEdit::_zoom_changed));
}
void AnimationBezierTrackEdit::set_editor(AnimationTrackEditor *p_editor) {
editor = p_editor;<fim_middle> return CURSOR_BDIAGSIZE;<fim_end>
|
return CURSOR_BDIAGSIZE;
} else if (abs(scaling_selection_handles.x) == 1) {
return CURSOR_HSIZE;
} else if (abs(scaling_selection_handles.y) == 1) {
return CURSOR_VSIZE;
}
}
// Hovering the scaling box
const Vector2i rel_pos = p_pos - selection_rect.position;
if (selection_handles_rect.has_point(p_pos)) {
if ((rel_pos.x < 0 && rel_pos.y < 0) || (rel_pos.x > selection_rect.size.width && rel_pos.y > selection_rect.size.height)) {
return CURSOR_FDIAGSIZE;
} else if ((rel_pos.x < 0 && rel_pos.y > selection_rect.size.height) || (rel_pos.x > selection_rect.size.width && rel_pos.y < 0)) {
|
return CURSOR_BDIAGSIZE;
|
} else if (rel_pos.x < 0 || rel_pos.x > selection_rect.size.width) {
return CURSOR_HSIZE;
} else if (rel_pos.y < 0 || rel_pos.y > selection_rect.size.height) {
return CURSOR_VSIZE;
}
return CURSOR_MOVE;
}
return get_default_cursor_shape();
}
void AnimationBezierTrackEdit::set_timeline(AnimationTimelineEdit *p_timeline) {
timeline = p_timeline;
timeline->connect("zoom_changed", callable_mp(this, &AnimationBezierTrackEdit::_zoom_changed));
timeline->connect("name_limit_changed", callable_mp(this, &AnimationBezierTrackEdit::_zoom_changed));
}
void AnimationBezierTrackEdit::set_editor(AnimationTrackEditor *p_editor) {
editor = p_editor;
|
random
|
<fim_prefix>;
RID accessibility_create_sub_element(const RID &p_parent_rid, DisplayServer::AccessibilityRole p_role, int p_insert_pos = -1) override;
virtual RID accessibility_create_sub_text_edit_elements(const RID &p_parent_rid, const RID &p_shaped_text, float p_min_height, int p_insert_pos = -1) override;
bool accessibility_has_element(const RID &p_id) const override;
void accessibility_free_element(const RID &p_id) override;
void accessibility_element_set_meta(const RID &p_id, const Variant &p_meta) override;
Variant accessibility_element_get_meta(const RID &p_id) const override;
void accessibility_update_if_active(const Callable &p_callable) override;
void accessibility_update_set_focus(<fim_suffix>) override;
RID accessibility_get_window_root(DisplayServer::WindowID p_window_id) const override;
void accessibility_set_window_rect(DisplayServer::WindowID p_window_id, const Rect2 &p_rect_out, const Rect2 &p_rect_in) override;
void accessibility_set_window_focused(DisplayServer::WindowID p_window_id, bool p_focused) override;
void accessibility_update_set_role(const RID &p_id, DisplayServer::AccessibilityRole p_role) override;
void accessibility_update_set_name(const RID &p_id, const String &p_name) override;
void accessibility_update_set_extra_info(const RID &p_id, const String &p_name_extra_info) override;
void accessibility_update_set_description(const RID &p_id, const String <fim_middle>const RID &p_id<fim_end>
|
;
RID accessibility_create_sub_element(const RID &p_parent_rid, DisplayServer::AccessibilityRole p_role, int p_insert_pos = -1) override;
virtual RID accessibility_create_sub_text_edit_elements(const RID &p_parent_rid, const RID &p_shaped_text, float p_min_height, int p_insert_pos = -1) override;
bool accessibility_has_element(const RID &p_id) const override;
void accessibility_free_element(const RID &p_id) override;
void accessibility_element_set_meta(const RID &p_id, const Variant &p_meta) override;
Variant accessibility_element_get_meta(const RID &p_id) const override;
void accessibility_update_if_active(const Callable &p_callable) override;
void accessibility_update_set_focus(
|
const RID &p_id
|
) override;
RID accessibility_get_window_root(DisplayServer::WindowID p_window_id) const override;
void accessibility_set_window_rect(DisplayServer::WindowID p_window_id, const Rect2 &p_rect_out, const Rect2 &p_rect_in) override;
void accessibility_set_window_focused(DisplayServer::WindowID p_window_id, bool p_focused) override;
void accessibility_update_set_role(const RID &p_id, DisplayServer::AccessibilityRole p_role) override;
void accessibility_update_set_name(const RID &p_id, const String &p_name) override;
void accessibility_update_set_extra_info(const RID &p_id, const String &p_name_extra_info) override;
void accessibility_update_set_description(const RID &p_id, const String
|
ast_based
|
<fim_prefix>
if (existing_idx != -1) {
undo_redo->add_undo_method(animation.ptr(), "track_insert_key", selected_track, dst_time, animation->track_get_key_value(selected_track, existing_idx), animation->track_get_key_transition(selected_track, existing_idx));
}
}
undo_redo->add_do_method(this, "_clear_selection_for_anim", animation);
undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation);
// Reselect pasted.
int i = 0;
for (const Pair<int, float> &E : new_selection_values) {
undo_redo->add_do_method(this, "_select_at_anim", animation, E.first, E.second, i == 0);
i++;
}
i = 0;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {<fim_suffix>
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->add_do_method(this, "queue_redraw");
undo_redo->add_undo_method(this, "queue_redraw");
undo_redo->commit_action();
}
}
void AnimationBezierTrackEdit::delete_selection() {
if (selection.size()) {
EditorUndoRedoManager *undo_redo = EditorUndoRedoManager::get_singleton();
undo_redo->create_action(TTR("Animation Delete Keys"));
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {<fim_middle> undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, animation->track_get_key_time(E->get().first, E->get().second), i == 0);
i++;
}<fim_end>
|
if (existing_idx != -1) {
undo_redo->add_undo_method(animation.ptr(), "track_insert_key", selected_track, dst_time, animation->track_get_key_value(selected_track, existing_idx), animation->track_get_key_transition(selected_track, existing_idx));
}
}
undo_redo->add_do_method(this, "_clear_selection_for_anim", animation);
undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation);
// Reselect pasted.
int i = 0;
for (const Pair<int, float> &E : new_selection_values) {
undo_redo->add_do_method(this, "_select_at_anim", animation, E.first, E.second, i == 0);
i++;
}
i = 0;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
|
undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, animation->track_get_key_time(E->get().first, E->get().second), i == 0);
i++;
}
|
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->add_do_method(this, "queue_redraw");
undo_redo->add_undo_method(this, "queue_redraw");
undo_redo->commit_action();
}
}
void AnimationBezierTrackEdit::delete_selection() {
if (selection.size()) {
EditorUndoRedoManager *undo_redo = EditorUndoRedoManager::get_singleton();
undo_redo->create_action(TTR("Animation Delete Keys"));
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
|
random
|
<fim_prefix>current control vector (but leave allocated for later)
layer_start = -1;
layer_end = -1;
return true;
}
if (n_embd != (int) hparams.n_embd) {
LLAMA_LOG_ERROR("%s: control vector n_embd does not match model\n", __func__);
return false;
}
if (tensors.empty()) {
if (!init(model)) {
return false;
}
}
layer_start = il_start;
layer_end = il_end;
for (size_t il = 1; il < hparams.n_layer; il++) {
assert(tensors[il] != nullptr);
const size_t off = n_embd * (il - 1); // buffer doesn't have data for layer 0, since it's never present
if (off + n_embd <= len) {
<fim_suffix>
}
}
return true;
}
// lora
llama_adapter_lora_weight * llama_adapter_lora::get_weight(ggml_tensor * w) {
const std::string name(w->name);
const auto pos = ab_map.find(name);
if (pos != ab_map.end()) {
return &pos->second;
}
return nullptr;
}
static void llama_adapter_lora_init_impl(llama_model & model, const char * path_lora, llama_adapter_lora & adapter) {
LLAMA_LOG_INFO("%s: loading lora adapter from '%s' ...\n", __func__, path_lora);
ggml_context * ctx_init;
gguf_init_params meta_gguf_params = {
/* .no_alloc = */ true,
/* .ctx = */ &ctx_init,
};
gguf_context_ptr ctx_gguf { gguf_init_from_file(pat<fim_middle>ggml_backend_tensor_set(tensors[il], data + off, 0, n_embd * ggml_element_size(tensors[il]));<fim_end>
|
current control vector (but leave allocated for later)
layer_start = -1;
layer_end = -1;
return true;
}
if (n_embd != (int) hparams.n_embd) {
LLAMA_LOG_ERROR("%s: control vector n_embd does not match model\n", __func__);
return false;
}
if (tensors.empty()) {
if (!init(model)) {
return false;
}
}
layer_start = il_start;
layer_end = il_end;
for (size_t il = 1; il < hparams.n_layer; il++) {
assert(tensors[il] != nullptr);
const size_t off = n_embd * (il - 1); // buffer doesn't have data for layer 0, since it's never present
if (off + n_embd <= len) {
|
ggml_backend_tensor_set(tensors[il], data + off, 0, n_embd * ggml_element_size(tensors[il]));
|
}
}
return true;
}
// lora
llama_adapter_lora_weight * llama_adapter_lora::get_weight(ggml_tensor * w) {
const std::string name(w->name);
const auto pos = ab_map.find(name);
if (pos != ab_map.end()) {
return &pos->second;
}
return nullptr;
}
static void llama_adapter_lora_init_impl(llama_model & model, const char * path_lora, llama_adapter_lora & adapter) {
LLAMA_LOG_INFO("%s: loading lora adapter from '%s' ...\n", __func__, path_lora);
ggml_context * ctx_init;
gguf_init_params meta_gguf_params = {
/* .no_alloc = */ true,
/* .ctx = */ &ctx_init,
};
gguf_context_ptr ctx_gguf { gguf_init_from_file(pat
|
ast_based
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.