text
stringlengths 435
1.65k
| prefix
stringlengths 51
700
| middle
stringlengths 10
200
| suffix
stringlengths 50
700
| type
stringclasses 2
values |
|---|---|---|---|---|
<fim_prefix>Flags |= ImGuiBackendFlags_RendererHasTextures; // We can honor ImGuiPlatformIO::Textures[] requests during render.
bd->LastCursor = ALLEGRO_SYSTEM_MOUSE_CURSOR_NONE;
ImGui_ImplAllegro5_SetDisplay(display);
#if ALLEGRO_HAS_CLIPBOARD
ImGuiPlatformIO& platform_io = ImGui::GetPlatformIO();
platform_io.Platform_SetClipboardTextFn = ImGui_ImplAllegro5_SetClipboardText;
platform_io.Platform_GetClipboardTextFn = ImGui_ImplAllegro5_GetClipboardText;
#endif
return true;
}
void ImGui_ImplAllegro5_Shutdown()
{
ImGui_ImplAllegro5_Data* bd = ImGui_ImplAllegro5_GetBackendData();
IM_ASSERT(bd != nullptr && "No platform backend to shutdown, or already shutdown?");
<fim_suffix>
ImGuiPlatformIO& platform_io = ImGui::GetPlatformIO();
ImGui_ImplAllegro5_InvalidateDeviceObjects();
if (bd->VertexDecl)
al_destroy_vertex_decl(bd->VertexDecl);
if (bd->ClipboardTextData)
al_free(bd->ClipboardTextData);
io.BackendPlatformName = io.BackendRendererName = nullptr;
io.BackendPlatformUserData = nullptr;
io.BackendFlags &= ~(ImGuiBackendFlags_HasMouseCursors | ImGuiBackendFlags_RendererHasTextures);
platform_io.ClearRendererHandlers();
platform_io.ClearPlatformHandlers();
IM_DELETE(bd);
}
void ImGui_ImplAllegro5_SetDisplay(ALLEGRO_DISPLAY* display)
{
ImGui_ImplAllegro5_Data* bd = ImGui_ImplAllegro5_GetBackendData();
<fim_middle>ImGuiIO& io = ImGui::GetIO();<fim_end>
|
Flags |= ImGuiBackendFlags_RendererHasTextures; // We can honor ImGuiPlatformIO::Textures[] requests during render.
bd->LastCursor = ALLEGRO_SYSTEM_MOUSE_CURSOR_NONE;
ImGui_ImplAllegro5_SetDisplay(display);
#if ALLEGRO_HAS_CLIPBOARD
ImGuiPlatformIO& platform_io = ImGui::GetPlatformIO();
platform_io.Platform_SetClipboardTextFn = ImGui_ImplAllegro5_SetClipboardText;
platform_io.Platform_GetClipboardTextFn = ImGui_ImplAllegro5_GetClipboardText;
#endif
return true;
}
void ImGui_ImplAllegro5_Shutdown()
{
ImGui_ImplAllegro5_Data* bd = ImGui_ImplAllegro5_GetBackendData();
IM_ASSERT(bd != nullptr && "No platform backend to shutdown, or already shutdown?");
|
ImGuiIO& io = ImGui::GetIO();
|
ImGuiPlatformIO& platform_io = ImGui::GetPlatformIO();
ImGui_ImplAllegro5_InvalidateDeviceObjects();
if (bd->VertexDecl)
al_destroy_vertex_decl(bd->VertexDecl);
if (bd->ClipboardTextData)
al_free(bd->ClipboardTextData);
io.BackendPlatformName = io.BackendRendererName = nullptr;
io.BackendPlatformUserData = nullptr;
io.BackendFlags &= ~(ImGuiBackendFlags_HasMouseCursors | ImGuiBackendFlags_RendererHasTextures);
platform_io.ClearRendererHandlers();
platform_io.ClearPlatformHandlers();
IM_DELETE(bd);
}
void ImGui_ImplAllegro5_SetDisplay(ALLEGRO_DISPLAY* display)
{
ImGui_ImplAllegro5_Data* bd = ImGui_ImplAllegro5_GetBackendData();
|
ast_based
|
<fim_prefix>r_ae);
ERR_FAIL_COND(other_ae->window_id != ae->window_id);
_ensure_node(p_id, ae);
accesskit_node_push_to_radio_group(ae->node, (accesskit_node_id)p_related_id.get_id());
}
void AccessibilityDriverAccessKit::accessibility_update_set_active_descendant(const RID &p_id, const RID &p_other_id) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
AccessibilityElement *other_ae = rid_owner.get_or_null(p_other_id);
ERR_FAIL_NULL(other_ae);
ERR_FAIL_COND(other_ae->window_id != ae->window_id);
_ensure_node(p_id, ae);
<fim_suffix>;
}
void AccessibilityDriverAccessKit::accessibility_update_set_next_on_line(const RID &p_id, const RID &p_other_id) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
AccessibilityElement *other_ae = rid_owner.get_or_null(p_other_id);
ERR_FAIL_NULL(other_ae);
ERR_FAIL_COND(other_ae->window_id != ae->window_id);
_ensure_node(p_id, ae);
accesskit_node_set_next_on_line(ae->node, (accesskit_node_id)p_other_id.get_id());
}
void AccessibilityDriverAccessKit::accessibility_update_set_previous_on_line(const RID &p_id,<fim_middle>accesskit_node_set_active_descendant(ae->node, (accesskit_node_id)p_other_id.get_id())<fim_end>
|
r_ae);
ERR_FAIL_COND(other_ae->window_id != ae->window_id);
_ensure_node(p_id, ae);
accesskit_node_push_to_radio_group(ae->node, (accesskit_node_id)p_related_id.get_id());
}
void AccessibilityDriverAccessKit::accessibility_update_set_active_descendant(const RID &p_id, const RID &p_other_id) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
AccessibilityElement *other_ae = rid_owner.get_or_null(p_other_id);
ERR_FAIL_NULL(other_ae);
ERR_FAIL_COND(other_ae->window_id != ae->window_id);
_ensure_node(p_id, ae);
|
accesskit_node_set_active_descendant(ae->node, (accesskit_node_id)p_other_id.get_id())
|
;
}
void AccessibilityDriverAccessKit::accessibility_update_set_next_on_line(const RID &p_id, const RID &p_other_id) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
AccessibilityElement *other_ae = rid_owner.get_or_null(p_other_id);
ERR_FAIL_NULL(other_ae);
ERR_FAIL_COND(other_ae->window_id != ae->window_id);
_ensure_node(p_id, ae);
accesskit_node_set_next_on_line(ae->node, (accesskit_node_id)p_other_id.get_id());
}
void AccessibilityDriverAccessKit::accessibility_update_set_previous_on_line(const RID &p_id,
|
ast_based
|
<fim_prefix> }
return;
}
if (p_property.name == "animation") {
List<StringName> names;
frames->get_animation_list(&names);
names.sort_custom<StringName::AlphCompare>();
bool current_found = false;
bool is_first_element = true;
for (const StringName &E : names) {
if (!is_first_element) {
p_property.hint_string += ",";
} else {
is_first_element = false;
}
p_property.hint_string += String(E);
if (animation == E) {
current_found = true;
}
}
if (!current_found) {
if (p_property.hint_string.is_empty()) {
p_property.hint_string = String(animation);
} else {
p_property.hint_string = String(animation) + "," + p_property.hint_string;
}<fim_suffix> }
if (p_property.name == "frame") {
if (playing) {
p_property.usage = PROPERTY_USAGE_EDITOR | PROPERTY_USAGE_READ_ONLY;
return;
}
p_property.hint = PROPERTY_HINT_RANGE;
if (frames->has_animation(animation) && frames->get_frame_count(animation) > 0) {
p_property.hint_string = "0," + itos(frames->get_frame_count(animation) - 1) + ",1";
} else {
// Avoid an error, `hint_string` is required for `PROPERTY_HINT_RANGE`.
p_property.hint_string = "0,0,1";
}
p_property.usage |= PROPERTY_USAGE_KEYING_INCREMENTS;
}
}
void AnimatedSprite2D::_notification(int p_what) {
switch (p_what) {
case NOTIFICATION_ACCESSIBILITY_UPDATE: {<fim_middle> }
return;<fim_end>
|
}
return;
}
if (p_property.name == "animation") {
List<StringName> names;
frames->get_animation_list(&names);
names.sort_custom<StringName::AlphCompare>();
bool current_found = false;
bool is_first_element = true;
for (const StringName &E : names) {
if (!is_first_element) {
p_property.hint_string += ",";
} else {
is_first_element = false;
}
p_property.hint_string += String(E);
if (animation == E) {
current_found = true;
}
}
if (!current_found) {
if (p_property.hint_string.is_empty()) {
p_property.hint_string = String(animation);
} else {
p_property.hint_string = String(animation) + "," + p_property.hint_string;
}
|
}
return;
|
}
if (p_property.name == "frame") {
if (playing) {
p_property.usage = PROPERTY_USAGE_EDITOR | PROPERTY_USAGE_READ_ONLY;
return;
}
p_property.hint = PROPERTY_HINT_RANGE;
if (frames->has_animation(animation) && frames->get_frame_count(animation) > 0) {
p_property.hint_string = "0," + itos(frames->get_frame_count(animation) - 1) + ",1";
} else {
// Avoid an error, `hint_string` is required for `PROPERTY_HINT_RANGE`.
p_property.hint_string = "0,0,1";
}
p_property.usage |= PROPERTY_USAGE_KEYING_INCREMENTS;
}
}
void AnimatedSprite2D::_notification(int p_what) {
switch (p_what) {
case NOTIFICATION_ACCESSIBILITY_UPDATE: {
|
random
|
<fim_prefix> llama_seq_id seq_id);
// Copy the state of a single sequence into the specified buffer
LLAMA_API size_t llama_state_seq_get_data(
struct llama_context * ctx,
uint8_t * dst,
size_t size,
llama_seq_id seq_id);
// Copy the sequence data (originally copied with `llama_state_seq_get_data`) into the specified sequence
// Returns:
// - Positive: Ok
// - Zero: Failed to load
LLAMA_API size_t llama_state_seq_set_data(
struct llama_context * ctx,
const uint8_t * src,
size_t size,<fim_suffix> llama_seq_id seq_id,
const llama_token * tokens,
size_t n_token_count);
LLAMA_API size_t llama_state_seq_load_file(
struct llama_context * ctx,
const char * filepath,
llama_seq_id dest_seq_id,
llama_token * tokens_out,
size_t n_token_capacity,
size_t * n_token_count_out);
// for backwards-compat
#define LLAMA_STATE_SEQ_FLAGS_SWA_ONLY 1
// work only with partial states, such as SWA KV cache or recurrent cache (e.g. Mamba)
#define LLAMA_STATE_SEQ_FLAGS_PARTIAL_ONLY 1
<fim_middle> llama_seq_id dest_seq_id);
LLAMA_API size_t llama_state_seq_save_file(
struct llama_context * ctx,
const char * filepath,<fim_end>
|
llama_seq_id seq_id);
// Copy the state of a single sequence into the specified buffer
LLAMA_API size_t llama_state_seq_get_data(
struct llama_context * ctx,
uint8_t * dst,
size_t size,
llama_seq_id seq_id);
// Copy the sequence data (originally copied with `llama_state_seq_get_data`) into the specified sequence
// Returns:
// - Positive: Ok
// - Zero: Failed to load
LLAMA_API size_t llama_state_seq_set_data(
struct llama_context * ctx,
const uint8_t * src,
size_t size,
|
llama_seq_id dest_seq_id);
LLAMA_API size_t llama_state_seq_save_file(
struct llama_context * ctx,
const char * filepath,
|
llama_seq_id seq_id,
const llama_token * tokens,
size_t n_token_count);
LLAMA_API size_t llama_state_seq_load_file(
struct llama_context * ctx,
const char * filepath,
llama_seq_id dest_seq_id,
llama_token * tokens_out,
size_t n_token_capacity,
size_t * n_token_count_out);
// for backwards-compat
#define LLAMA_STATE_SEQ_FLAGS_SWA_ONLY 1
// work only with partial states, such as SWA KV cache or recurrent cache (e.g. Mamba)
#define LLAMA_STATE_SEQ_FLAGS_PARTIAL_ONLY 1
|
random
|
<fim_prefix>SM_SINGLE_BLOCK);
api_.SetImage(src_pix_);
api_.SetVariable("tessedit_make_boxes_from_boxes", "1");
api_.SetInputName(TestDataNameToPath(filename).c_str());
found = true;
}
return found;
}
// Runs ApplyBoxes (via setting the appropriate variables and Recognize)
// and checks that the output ocr text matches the truth_str, and that
// the boxes match the given box file well enough.
// If line_mode is true, ApplyBoxes is run in line segmentation mode,
// otherwise the input box file is assumed to have character-level boxes.
void VerifyBoxesAndText(const char *imagefile, const char *truth_str, const char *target_box_file,
<fim_suffix>) {
if (!SetImage(imagefile)) {
// eng.traineddata not found or other problem during Init.
GTEST_SKIP();
}
if (line_mode) {
api_.SetVariable("tessedit_resegment_from_line_boxes", "1");
} else {
api_.SetVariable("tessedit_resegment_from_boxes", "1");
}
api_.Recognize(nullptr);
char *ocr_text = api_.GetUTF8Text();
EXPECT_STREQ(truth_str, ocr_text);
delete[] ocr_text;
// Test the boxes by reading the target box file in parallel with the
// bounding boxes in the ocr output.
std::string box_filename = TestDataNameToPath(target_box_file);
FILE *box_file = OpenBoxFile(box_filename.c_str());
ASSERT_TRUE(box_file != nullp<fim_middle>bool line_mode<fim_end>
|
SM_SINGLE_BLOCK);
api_.SetImage(src_pix_);
api_.SetVariable("tessedit_make_boxes_from_boxes", "1");
api_.SetInputName(TestDataNameToPath(filename).c_str());
found = true;
}
return found;
}
// Runs ApplyBoxes (via setting the appropriate variables and Recognize)
// and checks that the output ocr text matches the truth_str, and that
// the boxes match the given box file well enough.
// If line_mode is true, ApplyBoxes is run in line segmentation mode,
// otherwise the input box file is assumed to have character-level boxes.
void VerifyBoxesAndText(const char *imagefile, const char *truth_str, const char *target_box_file,
|
bool line_mode
|
) {
if (!SetImage(imagefile)) {
// eng.traineddata not found or other problem during Init.
GTEST_SKIP();
}
if (line_mode) {
api_.SetVariable("tessedit_resegment_from_line_boxes", "1");
} else {
api_.SetVariable("tessedit_resegment_from_boxes", "1");
}
api_.Recognize(nullptr);
char *ocr_text = api_.GetUTF8Text();
EXPECT_STREQ(truth_str, ocr_text);
delete[] ocr_text;
// Test the boxes by reading the target box file in parallel with the
// bounding boxes in the ocr output.
std::string box_filename = TestDataNameToPath(target_box_file);
FILE *box_file = OpenBoxFile(box_filename.c_str());
ASSERT_TRUE(box_file != nullp
|
ast_based
|
<fim_prefix>t_time_scale(double p_scale);
double get_time_scale() const;
double get_unfrozen_time_scale() const;
void set_print_to_stdout(bool p_enabled);
bool is_printing_to_stdout() const;
void set_print_error_messages(bool p_enabled);
bool is_printing_error_messages() const;
void print_header(const String &p_string) const;
void print_header_rich(const String &p_string) const;
void set_frame_delay(uint32_t p_msec);
uint32_t get_frame_delay() const;
void add_singleton(const Singleton &p_singleton);
void get_singletons(List<Singleton> *p_singletons);
bool has_singleton(const StringName &p_name) const;
Object *get_singleton_object(const StringName &p_name) const;
void remove_singleton(<fim_suffix>);
bool is_singleton_user_created(const StringName &p_name) const;
bool is_singleton_editor_only(const StringName &p_name) const;
#ifdef TOOLS_ENABLED
_FORCE_INLINE_ void set_editor_hint(bool p_enabled) { editor_hint = p_enabled; }
_FORCE_INLINE_ bool is_editor_hint() const { return editor_hint; }
_FORCE_INLINE_ void set_project_manager_hint(bool p_enabled) { project_manager_hint = p_enabled; }
_FORCE_INLINE_ bool is_project_manager_hint() const { return project_manager_hint; }
_FORCE_INLINE_ void set_extension_reloading_enabled(bool p_enabled) { extension_reloading = p_enabled; }
_FORCE_INLINE_ bool is_extension_reloading_enabled() const { return extension_reloading; }
_FORCE_I<fim_middle>const StringName &p_name<fim_end>
|
t_time_scale(double p_scale);
double get_time_scale() const;
double get_unfrozen_time_scale() const;
void set_print_to_stdout(bool p_enabled);
bool is_printing_to_stdout() const;
void set_print_error_messages(bool p_enabled);
bool is_printing_error_messages() const;
void print_header(const String &p_string) const;
void print_header_rich(const String &p_string) const;
void set_frame_delay(uint32_t p_msec);
uint32_t get_frame_delay() const;
void add_singleton(const Singleton &p_singleton);
void get_singletons(List<Singleton> *p_singletons);
bool has_singleton(const StringName &p_name) const;
Object *get_singleton_object(const StringName &p_name) const;
void remove_singleton(
|
const StringName &p_name
|
);
bool is_singleton_user_created(const StringName &p_name) const;
bool is_singleton_editor_only(const StringName &p_name) const;
#ifdef TOOLS_ENABLED
_FORCE_INLINE_ void set_editor_hint(bool p_enabled) { editor_hint = p_enabled; }
_FORCE_INLINE_ bool is_editor_hint() const { return editor_hint; }
_FORCE_INLINE_ void set_project_manager_hint(bool p_enabled) { project_manager_hint = p_enabled; }
_FORCE_INLINE_ bool is_project_manager_hint() const { return project_manager_hint; }
_FORCE_INLINE_ void set_extension_reloading_enabled(bool p_enabled) { extension_reloading = p_enabled; }
_FORCE_INLINE_ bool is_extension_reloading_enabled() const { return extension_reloading; }
_FORCE_I
|
ast_based
|
<fim_prefix>(model_tensor->ne[0] != w.a->ne[0] || model_tensor->ne[1] != w.b->ne[1]) {
throw std::runtime_error("tensor '" + name + "' has incorrect shape (hint: maybe wrong base model?)");
}
if (w.a->ne[1] != w.b->ne[0]) {
throw std::runtime_error("lora_a tensor is not transposed (hint: adapter from \"finetune\" example is no longer supported)");
}
}
// save tensor to adapter
ggml_tensor * tensor_a = ggml_dup_tensor(dev_ctx, w.a);
ggml_tensor * tensor_b = ggml_dup_tensor(dev_ctx, w.b);
ggml_set_name(tensor_a, w.a->name);
ggml_set_name(tensor_b, w.b->name);
adapter.ab_map[name] = <fim_suffix>;
}
// allocate tensors / buffers and zero
{
adapter.ctxs.reserve(ctx_map.size());
adapter.bufs.reserve(ctx_map.size());
for (auto & it : ctx_map) {
ggml_backend_buffer_type_t buft = it.first;
ggml_context * ctx_dev = it.second;
ggml_backend_buffer_ptr buf { ggml_backend_alloc_ctx_tensors_from_buft(ctx_dev, buft) };
if (!buf) {
throw std::runtime_error("failed to allocate buffer for lora adapter\n");
}
LLAMA_LOG_INFO("%s: %10s LoRA buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf.get()), ggml_backend_buffer_get_size(buf.get())/1024.0/1024.0);
<fim_middle>llama_adapter_lora_weight(tensor_a, tensor_b)<fim_end>
|
(model_tensor->ne[0] != w.a->ne[0] || model_tensor->ne[1] != w.b->ne[1]) {
throw std::runtime_error("tensor '" + name + "' has incorrect shape (hint: maybe wrong base model?)");
}
if (w.a->ne[1] != w.b->ne[0]) {
throw std::runtime_error("lora_a tensor is not transposed (hint: adapter from \"finetune\" example is no longer supported)");
}
}
// save tensor to adapter
ggml_tensor * tensor_a = ggml_dup_tensor(dev_ctx, w.a);
ggml_tensor * tensor_b = ggml_dup_tensor(dev_ctx, w.b);
ggml_set_name(tensor_a, w.a->name);
ggml_set_name(tensor_b, w.b->name);
adapter.ab_map[name] =
|
llama_adapter_lora_weight(tensor_a, tensor_b)
|
;
}
// allocate tensors / buffers and zero
{
adapter.ctxs.reserve(ctx_map.size());
adapter.bufs.reserve(ctx_map.size());
for (auto & it : ctx_map) {
ggml_backend_buffer_type_t buft = it.first;
ggml_context * ctx_dev = it.second;
ggml_backend_buffer_ptr buf { ggml_backend_alloc_ctx_tensors_from_buft(ctx_dev, buft) };
if (!buf) {
throw std::runtime_error("failed to allocate buffer for lora adapter\n");
}
LLAMA_LOG_INFO("%s: %10s LoRA buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf.get()), ggml_backend_buffer_get_size(buf.get())/1024.0/1024.0);
|
ast_based
|
<fim_prefix>ion(),
kMinCredibleResolution, kMaxCredibleResolution);
if (estimated_res != thresholder_->GetScaledEstimatedResolution()) {
tprintf(
"Estimated internal resolution %d out of range! "
"Corrected to %d.\n",
thresholder_->GetScaledEstimatedResolution(), estimated_res);
}
tesseract_->set_source_resolution(estimated_res);
return true;
}
/** Find lines from the image making the BLOCK_LIST. */
int TessBaseAPI::FindLines() {
if (thresholder_ == nullptr || thresholder_->IsEmpty()) {
tprintf("Please call SetImage before attempting recognition.\n");
return -1;
}
if (recognition_done_) {
ClearResults();
}
if (!<fim_suffix>) {
return 0;
}
if (tesseract_ == nullptr) {
tesseract_ = new Tesseract;
#ifndef DISABLED_LEGACY_ENGINE
tesseract_->InitAdaptiveClassifier(nullptr);
#endif
}
if (tesseract_->pix_binary() == nullptr && !Threshold(&tesseract_->mutable_pix_binary()->pix_)) {
return -1;
}
tesseract_->PrepareForPageseg();
#ifndef DISABLED_LEGACY_ENGINE
if (tesseract_->textord_equation_detect) {
if (equ_detect_ == nullptr && !datapath_.empty()) {
equ_detect_ = new EquationDetect(datapath_.c_str(), nullptr);
}
if (equ_detect_ == nullptr) {
tprintf("Warning: Could not set equation detector\n");
} else {
tesseract_->SetEquationDetect(equ_detect_);
<fim_middle>block_list_->empty()<fim_end>
|
ion(),
kMinCredibleResolution, kMaxCredibleResolution);
if (estimated_res != thresholder_->GetScaledEstimatedResolution()) {
tprintf(
"Estimated internal resolution %d out of range! "
"Corrected to %d.\n",
thresholder_->GetScaledEstimatedResolution(), estimated_res);
}
tesseract_->set_source_resolution(estimated_res);
return true;
}
/** Find lines from the image making the BLOCK_LIST. */
int TessBaseAPI::FindLines() {
if (thresholder_ == nullptr || thresholder_->IsEmpty()) {
tprintf("Please call SetImage before attempting recognition.\n");
return -1;
}
if (recognition_done_) {
ClearResults();
}
if (!
|
block_list_->empty()
|
) {
return 0;
}
if (tesseract_ == nullptr) {
tesseract_ = new Tesseract;
#ifndef DISABLED_LEGACY_ENGINE
tesseract_->InitAdaptiveClassifier(nullptr);
#endif
}
if (tesseract_->pix_binary() == nullptr && !Threshold(&tesseract_->mutable_pix_binary()->pix_)) {
return -1;
}
tesseract_->PrepareForPageseg();
#ifndef DISABLED_LEGACY_ENGINE
if (tesseract_->textord_equation_detect) {
if (equ_detect_ == nullptr && !datapath_.empty()) {
equ_detect_ = new EquationDetect(datapath_.c_str(), nullptr);
}
if (equ_detect_ == nullptr) {
tprintf("Warning: Could not set equation detector\n");
} else {
tesseract_->SetEquationDetect(equ_detect_);
|
ast_based
|
<fim_prefix>nd);
newpos += -scaling_selection_offset.x + (newpos - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
Array key = animation->track_get_key_value(E->get().first, E->get().second);
real_t h = key[0];
h += -scaling_selection_offset.y + (h - scaling_selection_pivot.y) * (scaling_selection_scale.y - 1);
key[0] = h;
undo_redo->add_do_method(
this,
"_bezier_track_insert_key_at_anim",
animation,
E->get().first,
newpos,
key[0],
Vector2(key[1], key[2]),
Vector2(key[3], key[4]),
animation->bezier_track_get_key_handle_mode(E->get().first, E->get().second));
}
// 4 - (undo) Remove inserted keys.
for (<fim_suffix> E; E = E->prev()) {
real_t newpos = animation->track_get_key_time(E->get().first, E->get().second);
newpos += -scaling_selection_offset.x + (newpos - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
undo_redo->add_undo_method(animation.ptr(), "track_remove_key_at_time", E->get().first, newpos);
}
// 5 - (undo) Reinsert keys.
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t oldpos = animation->track_get_key_time(E->get().first, E->get().second);
Array key = animation->track_get_key_value(E->get().first, E->get().second);
undo_redo->add_undo_method(
this,
"_bezier_track_insert_key_at_anim",
animation,<fim_middle>SelectionSet::Element *E = selection.back();<fim_end>
|
nd);
newpos += -scaling_selection_offset.x + (newpos - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
Array key = animation->track_get_key_value(E->get().first, E->get().second);
real_t h = key[0];
h += -scaling_selection_offset.y + (h - scaling_selection_pivot.y) * (scaling_selection_scale.y - 1);
key[0] = h;
undo_redo->add_do_method(
this,
"_bezier_track_insert_key_at_anim",
animation,
E->get().first,
newpos,
key[0],
Vector2(key[1], key[2]),
Vector2(key[3], key[4]),
animation->bezier_track_get_key_handle_mode(E->get().first, E->get().second));
}
// 4 - (undo) Remove inserted keys.
for (
|
SelectionSet::Element *E = selection.back();
|
E; E = E->prev()) {
real_t newpos = animation->track_get_key_time(E->get().first, E->get().second);
newpos += -scaling_selection_offset.x + (newpos - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
undo_redo->add_undo_method(animation.ptr(), "track_remove_key_at_time", E->get().first, newpos);
}
// 5 - (undo) Reinsert keys.
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t oldpos = animation->track_get_key_time(E->get().first, E->get().second);
Array key = animation->track_get_key_value(E->get().first, E->get().second);
undo_redo->add_undo_method(
this,
"_bezier_track_insert_key_at_anim",
animation,
|
ast_based
|
<fim_prefix>print_build_info();
llama_backend_init();
llama_numa_init(params.numa);
// load the model to get hparams
common_init_result llama_init = common_init_from_params(params);
llama_model * model = llama_init.model.get();
llama_context * ctx = llama_init.context.get();
// int n_ctx = llama_n_ctx(ctx);
int n_layers = llama_model_n_layer(model);
int n_embd = llama_model_n_embd(model);
// get model hint param (a.k.a model arch name)
char model_hint[128];
llama_model_meta_val_str(model, "general.architecture", model_hint, 128);
// init train_context
train_context ctx_train(n_embd, n_layers);
// load and prepare entries for training
<fim_suffix>;
// we have to pretokenize everything because otherwise we don't know how much overhead to allocate ctx_diffs_wrapped
std::vector<tokenized_prompt> tokenized_prompts;
size_t n_total_tokens = 0;
for (size_t i = 0; i < ctx_train.positive_entries.size(); ++i) {
tokenized_prompt t(ctx, ctx_train.positive_entries[i], ctx_train.negative_entries[i]);
n_total_tokens += 2 * t.max_seq_len;
tokenized_prompts.push_back(std::move(t));
}
std::cout << "n_total_tokens: " << n_total_tokens << std::endl;
for(size_t i = 0; i < ctx_train.positive_entries.size(); ++i) {
bool success = false;
tokenized_prompt t = tokenized_prompts[i];
<fim_middle>prepare_entries(params, ctx_train)<fim_end>
|
print_build_info();
llama_backend_init();
llama_numa_init(params.numa);
// load the model to get hparams
common_init_result llama_init = common_init_from_params(params);
llama_model * model = llama_init.model.get();
llama_context * ctx = llama_init.context.get();
// int n_ctx = llama_n_ctx(ctx);
int n_layers = llama_model_n_layer(model);
int n_embd = llama_model_n_embd(model);
// get model hint param (a.k.a model arch name)
char model_hint[128];
llama_model_meta_val_str(model, "general.architecture", model_hint, 128);
// init train_context
train_context ctx_train(n_embd, n_layers);
// load and prepare entries for training
|
prepare_entries(params, ctx_train)
|
;
// we have to pretokenize everything because otherwise we don't know how much overhead to allocate ctx_diffs_wrapped
std::vector<tokenized_prompt> tokenized_prompts;
size_t n_total_tokens = 0;
for (size_t i = 0; i < ctx_train.positive_entries.size(); ++i) {
tokenized_prompt t(ctx, ctx_train.positive_entries[i], ctx_train.negative_entries[i]);
n_total_tokens += 2 * t.max_seq_len;
tokenized_prompts.push_back(std::move(t));
}
std::cout << "n_total_tokens: " << n_total_tokens << std::endl;
for(size_t i = 0; i < ctx_train.positive_entries.size(); ++i) {
bool success = false;
tokenized_prompt t = tokenized_prompts[i];
|
ast_based
|
<fim_prefix> src[0], len, \
src[1], len, \
src[2], len, \
src[3], len, \
dst, len), \
CV_HAL_ERROR_OK : \
CV_HAL_ERROR_NOT_IMPLEMENTED \
: CV_HAL_ERROR_NOT_IMPLEMENTED \
)
TegraRowOp_Invoker(combine2, combine2, 2, 1, 0, RANGE_DATA(ST, src1_data, sizeof(ST)), range.end-range.start,
RANGE_DATA(ST, src2_data, sizeof(ST)), range.end-range.start,
RANGE_DATA(DT, dst1_data, 2*sizeof(DT)), range.end-range.start)
TegraRowOp_Invoker(combine3, combine3, 3, 1, 0, <fim_suffix>, range.end-range.start,
RANGE_DATA(ST, src2_data, sizeof(ST)), range.end-range.start,
RANGE_DATA(ST, src3_data, sizeof(ST)), range.end-range.start,
RANGE_DATA(DT, dst1_data, 3*sizeof(DT)), range.end-range.start)
TegraRowOp_Invoker(combine4, combine4, 4, 1, 0, RANGE_DATA(ST, src1_data, sizeof(ST)), range.end-range.start,
RANGE_DATA(ST, src2_data, sizeof(ST)), range.end-range.start,
RANGE_DATA(ST, src3_data, sizeof(ST)), range.end-range.start,
<fim_middle>RANGE_DATA(ST, src1_data, sizeof(ST))<fim_end>
|
src[0], len, \
src[1], len, \
src[2], len, \
src[3], len, \
dst, len), \
CV_HAL_ERROR_OK : \
CV_HAL_ERROR_NOT_IMPLEMENTED \
: CV_HAL_ERROR_NOT_IMPLEMENTED \
)
TegraRowOp_Invoker(combine2, combine2, 2, 1, 0, RANGE_DATA(ST, src1_data, sizeof(ST)), range.end-range.start,
RANGE_DATA(ST, src2_data, sizeof(ST)), range.end-range.start,
RANGE_DATA(DT, dst1_data, 2*sizeof(DT)), range.end-range.start)
TegraRowOp_Invoker(combine3, combine3, 3, 1, 0,
|
RANGE_DATA(ST, src1_data, sizeof(ST))
|
, range.end-range.start,
RANGE_DATA(ST, src2_data, sizeof(ST)), range.end-range.start,
RANGE_DATA(ST, src3_data, sizeof(ST)), range.end-range.start,
RANGE_DATA(DT, dst1_data, 3*sizeof(DT)), range.end-range.start)
TegraRowOp_Invoker(combine4, combine4, 4, 1, 0, RANGE_DATA(ST, src1_data, sizeof(ST)), range.end-range.start,
RANGE_DATA(ST, src2_data, sizeof(ST)), range.end-range.start,
RANGE_DATA(ST, src3_data, sizeof(ST)), range.end-range.start,
|
ast_based
|
<fim_prefix>
part_dict["copyright"] = array_from_info_count(cp_part.copyright_statements, cp_part.copyright_count);
part_dict["license"] = String::utf8(cp_part.license);
parts.push_back(part_dict);
}
component_dict["parts"] = parts;
components.push_back(component_dict);
}
return components;
}
Dictionary Engine::get_donor_info() const {
Dictionary donors;
donors["patrons"] = array_from_info(DONORS_PATRONS);
donors["platinum_sponsors"] = array_from_info(DONORS_SPONSORS_PLATINUM);
donors["gold_sponsors"] = array_from_info(DONORS_SPONSORS_GOLD);
donors["silver_sponsors"] = array_from_info(DONORS_SPONSORS_SILVER);
donors["diamond_members"] = array_from_info(DONORS_MEMBERS_DIAMOND);
<fim_suffix>
donors["platinum_members"] = array_from_info(DONORS_MEMBERS_PLATINUM);
donors["gold_members"] = array_from_info(DONORS_MEMBERS_GOLD);
return donors;
}
Dictionary Engine::get_license_info() const {
Dictionary licenses;
for (int i = 0; i < LICENSE_COUNT; i++) {
licenses[LICENSE_NAMES[i]] = LICENSE_BODIES[i];
}
return licenses;
}
String Engine::get_license_text() const {
return String(GODOT_LICENSE_TEXT);
}
String Engine::get_architecture_name() const {
#if defined(__x86_64) || defined(__x86_64__) || defined(__amd64__) || defined(_M_X64)
return "x86_64";
#elif defined(__i386) || defined(__i386__) || defined(_M_IX86)
return "x86_32";
#elif defined(__aarch64__) || defined(_M_ARM64<fim_middle>donors["titanium_members"] = array_from_info(DONORS_MEMBERS_TITANIUM);<fim_end>
|
part_dict["copyright"] = array_from_info_count(cp_part.copyright_statements, cp_part.copyright_count);
part_dict["license"] = String::utf8(cp_part.license);
parts.push_back(part_dict);
}
component_dict["parts"] = parts;
components.push_back(component_dict);
}
return components;
}
Dictionary Engine::get_donor_info() const {
Dictionary donors;
donors["patrons"] = array_from_info(DONORS_PATRONS);
donors["platinum_sponsors"] = array_from_info(DONORS_SPONSORS_PLATINUM);
donors["gold_sponsors"] = array_from_info(DONORS_SPONSORS_GOLD);
donors["silver_sponsors"] = array_from_info(DONORS_SPONSORS_SILVER);
donors["diamond_members"] = array_from_info(DONORS_MEMBERS_DIAMOND);
|
donors["titanium_members"] = array_from_info(DONORS_MEMBERS_TITANIUM);
|
donors["platinum_members"] = array_from_info(DONORS_MEMBERS_PLATINUM);
donors["gold_members"] = array_from_info(DONORS_MEMBERS_GOLD);
return donors;
}
Dictionary Engine::get_license_info() const {
Dictionary licenses;
for (int i = 0; i < LICENSE_COUNT; i++) {
licenses[LICENSE_NAMES[i]] = LICENSE_BODIES[i];
}
return licenses;
}
String Engine::get_license_text() const {
return String(GODOT_LICENSE_TEXT);
}
String Engine::get_architecture_name() const {
#if defined(__x86_64) || defined(__x86_64__) || defined(__amd64__) || defined(_M_X64)
return "x86_64";
#elif defined(__i386) || defined(__i386__) || defined(_M_IX86)
return "x86_32";
#elif defined(__aarch64__) || defined(_M_ARM64
|
ast_based
|
<fim_prefix>e <tesseract/baseapi.h>
#include <tesseract/resultiterator.h>
#include <string>
#include "boxread.h"
#include "rect.h"
#include "include_gunit.h"
namespace tesseract {
const char *kTruthTextWords = "To simple burn running of goods lately.\n";
const char *kTruthTextLine = "Tosimpleburnrunningofgoodslately.\n";
// The fixture for testing Tesseract.
class ApplyBoxTest : public testing::Test {
protected:
std::string TestDataNameToPath(const std::string &name) {
return file::JoinPath(TESTING_DIR, name);
}
std::string TessdataPath() {
return TESSDATA_DIR;
}
ApplyBoxTest() {
src_pix_ = nullptr;
}
~ApplyBoxTest() override {
src_pix_.destroy();
}
bool SetImage(<fim_suffix>) {
bool found = false;
src_pix_.destroy();
src_pix_ = pixRead(TestDataNameToPath(filename).c_str());
if (api_.Init(TessdataPath().c_str(), "eng", tesseract::OEM_TESSERACT_ONLY) != -1) {
api_.SetPageSegMode(tesseract::PSM_SINGLE_BLOCK);
api_.SetImage(src_pix_);
api_.SetVariable("tessedit_make_boxes_from_boxes", "1");
api_.SetInputName(TestDataNameToPath(filename).c_str());
found = true;
}
return found;
}
// Runs ApplyBoxes (via setting the appropriate variables and Recognize)
// and checks that the output ocr text matches the truth_str, and that
// the boxes match the given box file well enough.
// If line_mode is true, ApplyBo<fim_middle>const char *filename<fim_end>
|
e <tesseract/baseapi.h>
#include <tesseract/resultiterator.h>
#include <string>
#include "boxread.h"
#include "rect.h"
#include "include_gunit.h"
namespace tesseract {
const char *kTruthTextWords = "To simple burn running of goods lately.\n";
const char *kTruthTextLine = "Tosimpleburnrunningofgoodslately.\n";
// The fixture for testing Tesseract.
class ApplyBoxTest : public testing::Test {
protected:
std::string TestDataNameToPath(const std::string &name) {
return file::JoinPath(TESTING_DIR, name);
}
std::string TessdataPath() {
return TESSDATA_DIR;
}
ApplyBoxTest() {
src_pix_ = nullptr;
}
~ApplyBoxTest() override {
src_pix_.destroy();
}
bool SetImage(
|
const char *filename
|
) {
bool found = false;
src_pix_.destroy();
src_pix_ = pixRead(TestDataNameToPath(filename).c_str());
if (api_.Init(TessdataPath().c_str(), "eng", tesseract::OEM_TESSERACT_ONLY) != -1) {
api_.SetPageSegMode(tesseract::PSM_SINGLE_BLOCK);
api_.SetImage(src_pix_);
api_.SetVariable("tessedit_make_boxes_from_boxes", "1");
api_.SetInputName(TestDataNameToPath(filename).c_str());
found = true;
}
return found;
}
// Runs ApplyBoxes (via setting the appropriate variables and Recognize)
// and checks that the output ocr text matches the truth_str, and that
// the boxes match the given box file well enough.
// If line_mode is true, ApplyBo
|
ast_based
|
<fim_prefix> if (!params_parse(argc, argv, ¶ms)) {
return 1;
}
Config config;
TransformerWeights weights = {};
{
LOG_INF("%s: Loading llama2c model from %s\n", __func__, params.fn_llama2c_model);
FILE * file = fopen(params.fn_llama2c_model, "rb");
if (!file) {
LOG_ERR("%s: Unable to open the checkpoint file %s!\n", __func__, params.fn_llama2c_model);
return 1;
}
// read in the config header
if (fread(&config, sizeof(Config), 1, file) != 1) {
LOG_ERR("%s: Unable to read llama2c config from %s!\n",__func__,params.fn_llama2c_model);
return 1;<fim_suffix> alloc_weights(&weights, &config, shared_weights);
if (checkpoint_init_weights(&weights, &config, file, shared_weights)) {
LOG_ERR("%s: Unable to initialize transformer weights from %s!",__func__,params.fn_llama2c_model);
return 1;
}
fclose(file);
}
struct my_llama_vocab vocab;
load_vocab(params.fn_vocab_model, &config, &vocab);
struct my_llama_model model;
model.hparams.n_vocab = config.vocab_size; //llama_vocab_n_vocab(lctx);
model.hparams.n_ctx = params.n_ctx;
model.hparams.n_embd = config.dim; //params.n_embd;
model.hparams.n_ff = config.hidden_dim;<fim_middle> }
auto shared_weights = config.vocab_size > 0;
config.vocab_size = abs(config.vocab_size);
// read in the Transformer weights<fim_end>
|
if (!params_parse(argc, argv, ¶ms)) {
return 1;
}
Config config;
TransformerWeights weights = {};
{
LOG_INF("%s: Loading llama2c model from %s\n", __func__, params.fn_llama2c_model);
FILE * file = fopen(params.fn_llama2c_model, "rb");
if (!file) {
LOG_ERR("%s: Unable to open the checkpoint file %s!\n", __func__, params.fn_llama2c_model);
return 1;
}
// read in the config header
if (fread(&config, sizeof(Config), 1, file) != 1) {
LOG_ERR("%s: Unable to read llama2c config from %s!\n",__func__,params.fn_llama2c_model);
return 1;
|
}
auto shared_weights = config.vocab_size > 0;
config.vocab_size = abs(config.vocab_size);
// read in the Transformer weights
|
alloc_weights(&weights, &config, shared_weights);
if (checkpoint_init_weights(&weights, &config, file, shared_weights)) {
LOG_ERR("%s: Unable to initialize transformer weights from %s!",__func__,params.fn_llama2c_model);
return 1;
}
fclose(file);
}
struct my_llama_vocab vocab;
load_vocab(params.fn_vocab_model, &config, &vocab);
struct my_llama_model model;
model.hparams.n_vocab = config.vocab_size; //llama_vocab_n_vocab(lctx);
model.hparams.n_ctx = params.n_ctx;
model.hparams.n_embd = config.dim; //params.n_embd;
model.hparams.n_ff = config.hidden_dim;
|
random
|
<fim_prefix> tex->SetTexID(ImTextureID_Invalid);
tex->SetStatus(ImTextureStatus_Destroyed);
}
}
void ImGui_ImplAllegro5_InvalidateDeviceObjects()
{
ImGui_ImplAllegro5_Data* bd = ImGui_ImplAllegro5_GetBackendData();
// Destroy all textures
for (ImTextureData* tex : ImGui::GetPlatformIO().Textures)
if (tex->RefCount == 1)
{
tex->SetStatus(ImTextureStatus_WantDestroy);
ImGui_ImplAllegro5_UpdateTexture(tex);
}
// Destroy mouse cursor
if (bd->MouseCursorInvisible)
{
al_destroy_mouse_cursor(bd->MouseCursorInvisible);
bd->MouseCursorInvisible = nullptr;
}<fim_suffix> ImGui_ImplAllegro5_Data* bd = ImGui_ImplAllegro5_GetBackendData();
if (bd->ClipboardTextData)
al_free(bd->ClipboardTextData);
bd->ClipboardTextData = al_get_clipboard_text(bd->Display);
return bd->ClipboardTextData;
}
static void ImGui_ImplAllegro5_SetClipboardText(ImGuiContext*, const char* text)
{
ImGui_ImplAllegro5_Data* bd = ImGui_ImplAllegro5_GetBackendData();
al_set_clipboard_text(bd->Display, text);
}
#endif
// Not static to allow third-party code to use that if they want to (but undocumented)
ImGuiKey ImGui_ImplAllegro5_KeyCodeToImGuiKey(int key_code);
ImGuiKey ImGui_ImplAllegro5_KeyCodeToImGuiKey(int key_code)
{
switch (key_code)
{<fim_middle>}
#if ALLEGRO_HAS_CLIPBOARD
static const char* ImGui_ImplAllegro5_GetClipboardText(ImGuiContext*)
{<fim_end>
|
tex->SetTexID(ImTextureID_Invalid);
tex->SetStatus(ImTextureStatus_Destroyed);
}
}
void ImGui_ImplAllegro5_InvalidateDeviceObjects()
{
ImGui_ImplAllegro5_Data* bd = ImGui_ImplAllegro5_GetBackendData();
// Destroy all textures
for (ImTextureData* tex : ImGui::GetPlatformIO().Textures)
if (tex->RefCount == 1)
{
tex->SetStatus(ImTextureStatus_WantDestroy);
ImGui_ImplAllegro5_UpdateTexture(tex);
}
// Destroy mouse cursor
if (bd->MouseCursorInvisible)
{
al_destroy_mouse_cursor(bd->MouseCursorInvisible);
bd->MouseCursorInvisible = nullptr;
}
|
}
#if ALLEGRO_HAS_CLIPBOARD
static const char* ImGui_ImplAllegro5_GetClipboardText(ImGuiContext*)
{
|
ImGui_ImplAllegro5_Data* bd = ImGui_ImplAllegro5_GetBackendData();
if (bd->ClipboardTextData)
al_free(bd->ClipboardTextData);
bd->ClipboardTextData = al_get_clipboard_text(bd->Display);
return bd->ClipboardTextData;
}
static void ImGui_ImplAllegro5_SetClipboardText(ImGuiContext*, const char* text)
{
ImGui_ImplAllegro5_Data* bd = ImGui_ImplAllegro5_GetBackendData();
al_set_clipboard_text(bd->Display, text);
}
#endif
// Not static to allow third-party code to use that if they want to (but undocumented)
ImGuiKey ImGui_ImplAllegro5_KeyCodeToImGuiKey(int key_code);
ImGuiKey ImGui_ImplAllegro5_KeyCodeToImGuiKey(int key_code)
{
switch (key_code)
{
|
random
|
<fim_prefix>iff_filtered = filter_nonzero_rows(v_pos[il]);
v_diff_filtered.push_back(diff_filtered);
}
return v_diff_filtered; // for convinient, we return the result std::vector
}
// delete zero rows from a given 2D tensor
struct ggml_tensor * filter_nonzero_rows(struct ggml_tensor * a) {
//printf("filter_nonzero_rows\n");
auto is_row_all_zeros = [](struct ggml_tensor * t, int row, float eps) -> bool {
// check if given row containing all zero elements
int n_cols = t->ne[0]; // hint: should be equal to n_embd
for (int col = 0; col < n_cols; ++col) {
if (ggml_get_f32_nd(t, col, row, 0, 0) > eps) <fim_suffix>
}
return true;
};
std::vector<int> rows_to_copy; // the idx of non-zero cols (to be copied to row of diff_filtered)
for (int i_row = 0; i_row < a->ne[1]; i_row++) {
if (!is_row_all_zeros(a, i_row, 1e-6)) {
rows_to_copy.push_back(i_row);
}
}
// get "n_nonzero_rows" for the output "diff_filtered"
int n_nonzero_rows = rows_to_copy.size();
//printf("n_nonzero_rows: %d\n", n_nonzero_rows);
int n_embd = a->ne[0];
GGML_ASSERT(n_nonzero_rows > 0);
// diff_filtered: [n_embd, n_nonzero_rows]
struct ggml_tensor * diff_filtered = ggml_new_tensor_2d(
<fim_middle>{
return false;
}<fim_end>
|
iff_filtered = filter_nonzero_rows(v_pos[il]);
v_diff_filtered.push_back(diff_filtered);
}
return v_diff_filtered; // for convinient, we return the result std::vector
}
// delete zero rows from a given 2D tensor
struct ggml_tensor * filter_nonzero_rows(struct ggml_tensor * a) {
//printf("filter_nonzero_rows\n");
auto is_row_all_zeros = [](struct ggml_tensor * t, int row, float eps) -> bool {
// check if given row containing all zero elements
int n_cols = t->ne[0]; // hint: should be equal to n_embd
for (int col = 0; col < n_cols; ++col) {
if (ggml_get_f32_nd(t, col, row, 0, 0) > eps)
|
{
return false;
}
|
}
return true;
};
std::vector<int> rows_to_copy; // the idx of non-zero cols (to be copied to row of diff_filtered)
for (int i_row = 0; i_row < a->ne[1]; i_row++) {
if (!is_row_all_zeros(a, i_row, 1e-6)) {
rows_to_copy.push_back(i_row);
}
}
// get "n_nonzero_rows" for the output "diff_filtered"
int n_nonzero_rows = rows_to_copy.size();
//printf("n_nonzero_rows: %d\n", n_nonzero_rows);
int n_embd = a->ne[0];
GGML_ASSERT(n_nonzero_rows > 0);
// diff_filtered: [n_embd, n_nonzero_rows]
struct ggml_tensor * diff_filtered = ggml_new_tensor_2d(
|
ast_based
|
<fim_prefix> LOG_INF("%f\n", w->wv[0]);
LOG_INF("%f\n", w->wo[0]);
LOG_INF("%f\n", w->w1[0]);
LOG_INF("%f\n", w->w2[0]);
LOG_INF("%f\n", w->w3[0]);
LOG_INF("%f\n", w->rms_att_weight[0]);
if (!w->wcls.empty()) LOG_INF("%f\n", w->wcls[0]);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////// ggml structs and functions required to load models, configs and save the model.
struct my_llama_vocab {
using id = int32_t;
using token = std::string;
using ttype = llama_token_type;
struct token_data {
token text;
float score;<fim_suffix> std::vector<token_data> id_to_token;
};
struct my_llama_hparams {
uint32_t n_vocab = 32000;
uint32_t n_ctx = 512; // this is provided as user input?
uint32_t n_embd = 4096;
uint32_t n_ff = 11008;
uint32_t n_mult = 4;
uint32_t n_head = 32;
uint32_t n_head_kv = 32;
uint32_t n_layer = 32;
uint32_t n_rot = 64;
bool operator!=(const my_llama_hparams& other) const {
return memcmp(this, &other, sizeof(my_llama_hparams));
}
};
struct my_llama_layer {
// normalization
struct ggml_tensor * attention_norm;
// attention
struct ggml_tensor * wq;
struct ggml_tensor * wk;<fim_middle> ttype type;
};
std::unordered_map<token, id> token_to_id;<fim_end>
|
LOG_INF("%f\n", w->wv[0]);
LOG_INF("%f\n", w->wo[0]);
LOG_INF("%f\n", w->w1[0]);
LOG_INF("%f\n", w->w2[0]);
LOG_INF("%f\n", w->w3[0]);
LOG_INF("%f\n", w->rms_att_weight[0]);
if (!w->wcls.empty()) LOG_INF("%f\n", w->wcls[0]);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////// ggml structs and functions required to load models, configs and save the model.
struct my_llama_vocab {
using id = int32_t;
using token = std::string;
using ttype = llama_token_type;
struct token_data {
token text;
float score;
|
ttype type;
};
std::unordered_map<token, id> token_to_id;
|
std::vector<token_data> id_to_token;
};
struct my_llama_hparams {
uint32_t n_vocab = 32000;
uint32_t n_ctx = 512; // this is provided as user input?
uint32_t n_embd = 4096;
uint32_t n_ff = 11008;
uint32_t n_mult = 4;
uint32_t n_head = 32;
uint32_t n_head_kv = 32;
uint32_t n_layer = 32;
uint32_t n_rot = 64;
bool operator!=(const my_llama_hparams& other) const {
return memcmp(this, &other, sizeof(my_llama_hparams));
}
};
struct my_llama_layer {
// normalization
struct ggml_tensor * attention_norm;
// attention
struct ggml_tensor * wq;
struct ggml_tensor * wk;
|
random
|
<fim_prefix>
virtual int32_t contextLength() const = 0;
virtual auto specialTokens() -> std::unordered_map<std::string, std::string> const = 0;
protected:
// These are pure virtual because subclasses need to implement as the default implementation of
// 'prompt' above calls these functions
virtual std::vector<Token> tokenize(std::string_view str) const = 0;
virtual bool isSpecialToken(Token id) const = 0;
virtual std::string tokenToString(Token id) const = 0;
virtual void initSampler(const PromptContext &ctx) = 0;
virtual Token sampleToken() const = 0;
virtual bool evalTokens(int32_t nPast, std::span<const Token> tokens) const = 0;
virtual void shiftContext(<fim_suffix>, int32_t *nPast) = 0;
virtual int32_t inputLength() const = 0;
virtual int32_t computeModelInputPosition(std::span<const Token> input) const = 0;
virtual void setModelInputPosition(int32_t pos) = 0;
virtual void appendInputToken(Token tok) = 0;
virtual std::span<const Token> inputTokens() const = 0;
virtual const std::vector<Token> &endTokens() const = 0;
virtual bool shouldAddBOS() const = 0;
virtual int32_t maxContextLength(std::string const &modelPath) const
{
(void)modelPath;
return -1;
}
virtual int32_t layerCount(std::string const &modelPath) const
{
(void)modelPath;
return -1;
}
virtual auto ch<fim_middle>const PromptContext &promptCtx<fim_end>
|
virtual int32_t contextLength() const = 0;
virtual auto specialTokens() -> std::unordered_map<std::string, std::string> const = 0;
protected:
// These are pure virtual because subclasses need to implement as the default implementation of
// 'prompt' above calls these functions
virtual std::vector<Token> tokenize(std::string_view str) const = 0;
virtual bool isSpecialToken(Token id) const = 0;
virtual std::string tokenToString(Token id) const = 0;
virtual void initSampler(const PromptContext &ctx) = 0;
virtual Token sampleToken() const = 0;
virtual bool evalTokens(int32_t nPast, std::span<const Token> tokens) const = 0;
virtual void shiftContext(
|
const PromptContext &promptCtx
|
, int32_t *nPast) = 0;
virtual int32_t inputLength() const = 0;
virtual int32_t computeModelInputPosition(std::span<const Token> input) const = 0;
virtual void setModelInputPosition(int32_t pos) = 0;
virtual void appendInputToken(Token tok) = 0;
virtual std::span<const Token> inputTokens() const = 0;
virtual const std::vector<Token> &endTokens() const = 0;
virtual bool shouldAddBOS() const = 0;
virtual int32_t maxContextLength(std::string const &modelPath) const
{
(void)modelPath;
return -1;
}
virtual int32_t layerCount(std::string const &modelPath) const
{
(void)modelPath;
return -1;
}
virtual auto ch
|
ast_based
|
<fim_prefix>umerate()) {
map.Add(PrintToString(block_id),
Yaml::OutputMapping([&](Yaml::OutputMapping::Map map) {
for (auto [i, elem_id] : llvm::enumerate(block)) {
map.Add(llvm::itostr(i), Yaml::OutputScalar(elem_id));
}
}));
}
});
}
// Collects memory usage of members.
auto CollectMemUsage(MemUsage& mem_usage, llvm::StringRef label) const
-> void {
mem_usage.Collect(MemUsage::ConcatLabel(label, "values_"), values_);
mem_usage.Collect(MemUsage::ConcatLabel(label, "canonical_blocks_"),
canonical_blocks_, KeyContext(this));
}
auto size() const -> int { <fim_suffix> }
protected:
// Allocates a copy of the given data using our slab allocator.
auto AllocateCopy(ConstRefType data) -> RefType {
auto result = AllocateUninitialized(data.size());
std::uninitialized_copy(data.begin(), data.end(), result.begin());
return result;
}
// Allocates an uninitialized array using our slab allocator.
auto AllocateUninitialized(size_t size) -> RefType {
// We're not going to run a destructor, so ensure that's OK.
static_assert(std::is_trivially_destructible_v<ElementType>);
auto storage = static_cast<ElementType*>(
allocator_->Allocate(size * sizeof(ElementType), alignof(ElementType)));
return RefType(storage, size);
}
<fim_middle>return values_.size();<fim_end>
|
umerate()) {
map.Add(PrintToString(block_id),
Yaml::OutputMapping([&](Yaml::OutputMapping::Map map) {
for (auto [i, elem_id] : llvm::enumerate(block)) {
map.Add(llvm::itostr(i), Yaml::OutputScalar(elem_id));
}
}));
}
});
}
// Collects memory usage of members.
auto CollectMemUsage(MemUsage& mem_usage, llvm::StringRef label) const
-> void {
mem_usage.Collect(MemUsage::ConcatLabel(label, "values_"), values_);
mem_usage.Collect(MemUsage::ConcatLabel(label, "canonical_blocks_"),
canonical_blocks_, KeyContext(this));
}
auto size() const -> int {
|
return values_.size();
|
}
protected:
// Allocates a copy of the given data using our slab allocator.
auto AllocateCopy(ConstRefType data) -> RefType {
auto result = AllocateUninitialized(data.size());
std::uninitialized_copy(data.begin(), data.end(), result.begin());
return result;
}
// Allocates an uninitialized array using our slab allocator.
auto AllocateUninitialized(size_t size) -> RefType {
// We're not going to run a destructor, so ensure that's OK.
static_assert(std::is_trivially_destructible_v<ElementType>);
auto storage = static_cast<ElementType*>(
allocator_->Allocate(size * sizeof(ElementType), alignof(ElementType)));
return RefType(storage, size);
}
|
ast_based
|
<fim_prefix> gguf_file.read_raw(read_buf.data(), size);
ggml_backend_tensor_set(dev, read_buf.data(), 0, size);
};
for (auto & it : adapter.ab_map) {
auto orig = ab_map[it.first];
auto dev = it.second;
set_tensor(orig.a, dev.a);
set_tensor(orig.b, dev.b);
}
}
LLAMA_LOG_INFO("%s: loaded %zu tensors from lora file\n", __func__, adapter.ab_map.size()*2);
}
llama_adapter_lora * llama_adapter_lora_init(llama_model * model, const char * path_lora) {
llama_adapter_lora * adapter = new llama_adapter_lora();
try {
llama_adapter_lora_init_impl(*model, path_lora, *adapter);<fim_suffix> delete adapter;
}
return nullptr;
}
int32_t llama_adapter_meta_val_str(const llama_adapter_lora * adapter, const char * key, char * buf, size_t buf_size) {
const auto & it = adapter->gguf_kv.find(key);
if (it == adapter->gguf_kv.end()) {
if (buf_size > 0) {
buf[0] = '\0';
}
return -1;
}
return snprintf(buf, buf_size, "%s", it->second.c_str());
}
int32_t llama_adapter_meta_count(const llama_adapter_lora * adapter) {
return (int)adapter->gguf_kv.size();
}
int32_t llama_adapter_meta_key_by_index(const llama_adapter_lora * adapter, int i, char * buf, size_t buf_size) {<fim_middle> return adapter;
} catch (const std::exception & err) {
LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
<fim_end>
|
gguf_file.read_raw(read_buf.data(), size);
ggml_backend_tensor_set(dev, read_buf.data(), 0, size);
};
for (auto & it : adapter.ab_map) {
auto orig = ab_map[it.first];
auto dev = it.second;
set_tensor(orig.a, dev.a);
set_tensor(orig.b, dev.b);
}
}
LLAMA_LOG_INFO("%s: loaded %zu tensors from lora file\n", __func__, adapter.ab_map.size()*2);
}
llama_adapter_lora * llama_adapter_lora_init(llama_model * model, const char * path_lora) {
llama_adapter_lora * adapter = new llama_adapter_lora();
try {
llama_adapter_lora_init_impl(*model, path_lora, *adapter);
|
return adapter;
} catch (const std::exception & err) {
LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
|
delete adapter;
}
return nullptr;
}
int32_t llama_adapter_meta_val_str(const llama_adapter_lora * adapter, const char * key, char * buf, size_t buf_size) {
const auto & it = adapter->gguf_kv.find(key);
if (it == adapter->gguf_kv.end()) {
if (buf_size > 0) {
buf[0] = '\0';
}
return -1;
}
return snprintf(buf, buf_size, "%s", it->second.c_str());
}
int32_t llama_adapter_meta_count(const llama_adapter_lora * adapter) {
return (int)adapter->gguf_kv.size();
}
int32_t llama_adapter_meta_key_by_index(const llama_adapter_lora * adapter, int i, char * buf, size_t buf_size) {
|
random
|
<fim_prefix>float *) diff_tmp.data();
for (int ir = 0; ir < n_rows; ++ir) {
for (int ic = 0; ic < n_embd; ++ic) {
float f = arr[ir*n_embd + ic];
ggml_set_f32_nd(diff, ir, ic, 0, 0, f);
}
}
} else {
// only copy
memcpy(diff->data, diff_tmp.data(), ggml_nbytes(diff));
}
v_diff.push_back(diff);
print_debug_tensor(diff);
// free memory of diff_tmp
diff_tmp.resize(0);
}
}
~train_context() {
for (auto ptr : v_final) free(ptr->data);
for (auto ptr : v_diff) <fim_suffix>;
// no need to free v_diff_tmp, since we didn't use malloc
ggml_free(ctx_ggml);
}
};
struct tokenized_prompt {
std::vector<llama_token> tokens_pos;
std::vector<llama_token> tokens_neg;
size_t max_seq_len;
tokenized_prompt(llama_context * ctx, std::string pos, std::string neg) {
const llama_model * model = llama_get_model(ctx);
const llama_vocab * vocab = llama_model_get_vocab(model);
const bool add_bos = llama_vocab_get_add_bos(vocab);
tokens_pos = common_tokenize(ctx, pos, add_bos, true);
tokens_neg = common_tokenize(ctx, neg, add_bos, true);
max_seq_len = std::max(tokens_pos.size(), tokens_neg.size());
<fim_middle>free(ptr->data)<fim_end>
|
float *) diff_tmp.data();
for (int ir = 0; ir < n_rows; ++ir) {
for (int ic = 0; ic < n_embd; ++ic) {
float f = arr[ir*n_embd + ic];
ggml_set_f32_nd(diff, ir, ic, 0, 0, f);
}
}
} else {
// only copy
memcpy(diff->data, diff_tmp.data(), ggml_nbytes(diff));
}
v_diff.push_back(diff);
print_debug_tensor(diff);
// free memory of diff_tmp
diff_tmp.resize(0);
}
}
~train_context() {
for (auto ptr : v_final) free(ptr->data);
for (auto ptr : v_diff)
|
free(ptr->data)
|
;
// no need to free v_diff_tmp, since we didn't use malloc
ggml_free(ctx_ggml);
}
};
struct tokenized_prompt {
std::vector<llama_token> tokens_pos;
std::vector<llama_token> tokens_neg;
size_t max_seq_len;
tokenized_prompt(llama_context * ctx, std::string pos, std::string neg) {
const llama_model * model = llama_get_model(ctx);
const llama_vocab * vocab = llama_model_get_vocab(model);
const bool add_bos = llama_vocab_get_add_bos(vocab);
tokens_pos = common_tokenize(ctx, pos, add_bos, true);
tokens_neg = common_tokenize(ctx, neg, add_bos, true);
max_seq_len = std::max(tokens_pos.size(), tokens_neg.size());
|
ast_based
|
<fim_prefix>_set_interface.h>
#include <grpcpp/impl/completion_queue_tag.h>
#include <grpcpp/impl/rpc_method.h>
#include <grpcpp/impl/sync.h>
#include <grpcpp/support/client_interceptor.h>
#include <grpcpp/support/slice.h>
#include <atomic>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/util/grpc_check.h"
namespace grpc {
Channel::Channel(
const std::string& host, grpc_channel* channel,
std::vector<
std::unique_ptr<grpc::experimental::ClientInterceptorFactoryInterface>>
interceptor_creators)
: host_(host), c_channel_(channel) {
<fim_suffix>
}
Channel::~Channel() {
grpc_channel_destroy(c_channel_);
CompletionQueue* callback_cq = callback_cq_.load(std::memory_order_relaxed);
if (callback_cq != nullptr) {
if (grpc_iomgr_run_in_background()) {
// gRPC-core provides the backing needed for the preferred CQ type
callback_cq->Shutdown();
} else {
CompletionQueue::ReleaseCallbackAlternativeCQ(callback_cq);
}
}
}
namespace {
inline grpc_slice SliceFromArray(const char* arr, size_t len) {
return grpc_slice_from_copied_buffer(arr, len);
}
std::string GetChannelInfoField(grpc_channel* channel,
grpc_channel_info* channel_info,
char***<fim_middle>interceptor_creators_ = std::move(interceptor_creators);<fim_end>
|
_set_interface.h>
#include <grpcpp/impl/completion_queue_tag.h>
#include <grpcpp/impl/rpc_method.h>
#include <grpcpp/impl/sync.h>
#include <grpcpp/support/client_interceptor.h>
#include <grpcpp/support/slice.h>
#include <atomic>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/util/grpc_check.h"
namespace grpc {
Channel::Channel(
const std::string& host, grpc_channel* channel,
std::vector<
std::unique_ptr<grpc::experimental::ClientInterceptorFactoryInterface>>
interceptor_creators)
: host_(host), c_channel_(channel) {
|
interceptor_creators_ = std::move(interceptor_creators);
|
}
Channel::~Channel() {
grpc_channel_destroy(c_channel_);
CompletionQueue* callback_cq = callback_cq_.load(std::memory_order_relaxed);
if (callback_cq != nullptr) {
if (grpc_iomgr_run_in_background()) {
// gRPC-core provides the backing needed for the preferred CQ type
callback_cq->Shutdown();
} else {
CompletionQueue::ReleaseCallbackAlternativeCQ(callback_cq);
}
}
}
namespace {
inline grpc_slice SliceFromArray(const char* arr, size_t len) {
return grpc_slice_from_copied_buffer(arr, len);
}
std::string GetChannelInfoField(grpc_channel* channel,
grpc_channel_info* channel_info,
char***
|
ast_based
|
<fim_prefix> }
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->commit_action();
} else if (select_single_attempt != IntPair(-1, -1)) {
selection.clear();
set_animation_and_track(animation, select_single_attempt.first, read_only);
_select_at_anim(animation, select_single_attempt.first, animation->track_get_key_time(select_single_attempt.first, select_single_attempt.second), true);
}
moving_selection = false;
moving_selection_attempt = false;
moving_inserted_key = false;<fim_suffix> if (scaling_selection && mb.is_valid() && !read_only && !mb->is_pressed() && mb->get_button_index() == MouseButton::LEFT) {
if (std::abs(scaling_selection_scale.x - 1) > CMP_EPSILON || std::abs(scaling_selection_scale.y - 1) > CMP_EPSILON) {
// Scale it.
EditorUndoRedoManager *undo_redo = EditorUndoRedoManager::get_singleton();
undo_redo->create_action(TTR("Scale Bezier Points"));
List<AnimMoveRestore> to_restore;
List<Animation::HandleMode> to_restore_handle_modes;
// 1 - Remove the keys.
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
undo_redo->add_do_method(animation.ptr(), "track_remove_key", E->get().first, E->get().second);
}<fim_middle> moving_selection_mouse_begin = Point2();
queue_redraw();
}
}
<fim_end>
|
}
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->commit_action();
} else if (select_single_attempt != IntPair(-1, -1)) {
selection.clear();
set_animation_and_track(animation, select_single_attempt.first, read_only);
_select_at_anim(animation, select_single_attempt.first, animation->track_get_key_time(select_single_attempt.first, select_single_attempt.second), true);
}
moving_selection = false;
moving_selection_attempt = false;
moving_inserted_key = false;
|
moving_selection_mouse_begin = Point2();
queue_redraw();
}
}
|
if (scaling_selection && mb.is_valid() && !read_only && !mb->is_pressed() && mb->get_button_index() == MouseButton::LEFT) {
if (std::abs(scaling_selection_scale.x - 1) > CMP_EPSILON || std::abs(scaling_selection_scale.y - 1) > CMP_EPSILON) {
// Scale it.
EditorUndoRedoManager *undo_redo = EditorUndoRedoManager::get_singleton();
undo_redo->create_action(TTR("Scale Bezier Points"));
List<AnimMoveRestore> to_restore;
List<Animation::HandleMode> to_restore_handle_modes;
// 1 - Remove the keys.
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
undo_redo->add_do_method(animation.ptr(), "track_remove_key", E->get().first, E->get().second);
}
|
random
|
<fim_prefix> return Init(datapath, 0, language, oem, configs, configs_size, vars_vec, vars_values,
set_only_non_debug_params, nullptr);
}
// In-memory version reads the traineddata file directly from the given
// data[data_size] array. Also implements the version with a datapath in data,
// flagged by data_size = 0.
int TessBaseAPI::Init(const char *data, int data_size, const char *language, OcrEngineMode oem,
char **configs, int configs_size, const std::vector<std::string> *vars_vec,
const std::vector<std::string> *vars_values, bool set_only_non_debug_params,
FileReader reader) {
if (language == nullptr) {<fim_suffix> }
if (data == nullptr) {
data = "";
}
std::string datapath = data_size == 0 ? data : language;
// If the datapath, OcrEngineMode or the language have changed - start again.
// Note that the language_ field stores the last requested language that was
// initialized successfully, while tesseract_->lang stores the language
// actually used. They differ only if the requested language was nullptr, in
// which case tesseract_->lang is set to the Tesseract default ("eng").
if (tesseract_ != nullptr &&
(datapath_.empty() || language_.empty() || datapath_ != datapath ||
last_oem_requested_ != oem || (language_ != language && tesseract_->lang != language))) {<fim_middle> language = "";<fim_end>
|
return Init(datapath, 0, language, oem, configs, configs_size, vars_vec, vars_values,
set_only_non_debug_params, nullptr);
}
// In-memory version reads the traineddata file directly from the given
// data[data_size] array. Also implements the version with a datapath in data,
// flagged by data_size = 0.
int TessBaseAPI::Init(const char *data, int data_size, const char *language, OcrEngineMode oem,
char **configs, int configs_size, const std::vector<std::string> *vars_vec,
const std::vector<std::string> *vars_values, bool set_only_non_debug_params,
FileReader reader) {
if (language == nullptr) {
|
language = "";
|
}
if (data == nullptr) {
data = "";
}
std::string datapath = data_size == 0 ? data : language;
// If the datapath, OcrEngineMode or the language have changed - start again.
// Note that the language_ field stores the last requested language that was
// initialized successfully, while tesseract_->lang stores the language
// actually used. They differ only if the requested language was nullptr, in
// which case tesseract_->lang is set to the Tesseract default ("eng").
if (tesseract_ != nullptr &&
(datapath_.empty() || language_.empty() || datapath_ != datapath ||
last_oem_requested_ != oem || (language_ != language && tesseract_->lang != language))) {
|
random
|
<fim_prefix> // apitest_example including disabled tests takes about 18 min on ARMv7.
const time_t MAX_SECONDS_FOR_TEST = 240;
#else
// Release builds typically need less than 10 s for OCR of a test image,
// apitest_example including disabled tests takes about 90 s on ARMv7.
const time_t MAX_SECONDS_FOR_TEST = 55;
#endif
const time_t end_time = time(nullptr);
EXPECT_TRUE(end_time - start_time_ <= MAX_SECONDS_FOR_TEST)
<< "The test took too long - " << ::testing::PrintToString(end_time - start_time_);
}
time_t start_time_;
};
void OCRTester(const char *imgname, const char *groundtruth, const char *tessdatadir,
const char *lang) {<fim_suffix> std::locale loc("C"); // You can also use "" for the default system locale
std::ifstream file(groundtruth);
file.imbue(loc); // Use it for file input
std::string gtText((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
auto api = std::make_unique<tesseract::TessBaseAPI>();
ASSERT_FALSE(api->Init(tessdatadir, lang)) << "Could not initialize tesseract.";
Image image = pixRead(imgname);
ASSERT_TRUE(image != nullptr) << "Failed to read test image.";
api->SetImage(image);
outText = api->GetUTF8Text();
EXPECT_EQ(gtText, outText) << "Phototest.tif OCR does not match ground truth for "
<< ::testing::PrintToString(lang);<fim_middle> // log.info() << tessdatadir << " for language: " << lang << std::endl;
char *outText;<fim_end>
|
// apitest_example including disabled tests takes about 18 min on ARMv7.
const time_t MAX_SECONDS_FOR_TEST = 240;
#else
// Release builds typically need less than 10 s for OCR of a test image,
// apitest_example including disabled tests takes about 90 s on ARMv7.
const time_t MAX_SECONDS_FOR_TEST = 55;
#endif
const time_t end_time = time(nullptr);
EXPECT_TRUE(end_time - start_time_ <= MAX_SECONDS_FOR_TEST)
<< "The test took too long - " << ::testing::PrintToString(end_time - start_time_);
}
time_t start_time_;
};
void OCRTester(const char *imgname, const char *groundtruth, const char *tessdatadir,
const char *lang) {
|
// log.info() << tessdatadir << " for language: " << lang << std::endl;
char *outText;
|
std::locale loc("C"); // You can also use "" for the default system locale
std::ifstream file(groundtruth);
file.imbue(loc); // Use it for file input
std::string gtText((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
auto api = std::make_unique<tesseract::TessBaseAPI>();
ASSERT_FALSE(api->Init(tessdatadir, lang)) << "Could not initialize tesseract.";
Image image = pixRead(imgname);
ASSERT_TRUE(image != nullptr) << "Failed to read test image.";
api->SetImage(image);
outText = api->GetUTF8Text();
EXPECT_EQ(gtText, outText) << "Phototest.tif OCR does not match ground truth for "
<< ::testing::PrintToString(lang);
|
random
|
<fim_prefix>il++) {
auto & diff_tmp = v_diff_tmp[il];
int n_elem = diff_tmp.size() / sizeof(float);
GGML_ASSERT(n_elem % n_embd == 0);
int n_rows = n_elem / n_embd;
struct ggml_tensor * diff = transpose
? ggml_new_tensor_2d(ctx_ggml, GGML_TYPE_F32, n_rows, n_embd)
: ggml_new_tensor_2d(ctx_ggml, GGML_TYPE_F32, n_embd, n_rows);
ggml_set_name(diff, (std::string("diff_") + std::to_string(il)).c_str());
diff->data = malloc(ggml_nbytes(diff)); // TODO: get rid of this malloc if possible
if (transpose) {
// copy data & transpose
float * arr = (float *) <fim_suffix>;
for (int ir = 0; ir < n_rows; ++ir) {
for (int ic = 0; ic < n_embd; ++ic) {
float f = arr[ir*n_embd + ic];
ggml_set_f32_nd(diff, ir, ic, 0, 0, f);
}
}
} else {
// only copy
memcpy(diff->data, diff_tmp.data(), ggml_nbytes(diff));
}
v_diff.push_back(diff);
print_debug_tensor(diff);
// free memory of diff_tmp
diff_tmp.resize(0);
}
}
~train_context() {
for (auto ptr : v_final) free(ptr->data);
for (auto ptr : v_diff) free(ptr->data);
<fim_middle>diff_tmp.data()<fim_end>
|
il++) {
auto & diff_tmp = v_diff_tmp[il];
int n_elem = diff_tmp.size() / sizeof(float);
GGML_ASSERT(n_elem % n_embd == 0);
int n_rows = n_elem / n_embd;
struct ggml_tensor * diff = transpose
? ggml_new_tensor_2d(ctx_ggml, GGML_TYPE_F32, n_rows, n_embd)
: ggml_new_tensor_2d(ctx_ggml, GGML_TYPE_F32, n_embd, n_rows);
ggml_set_name(diff, (std::string("diff_") + std::to_string(il)).c_str());
diff->data = malloc(ggml_nbytes(diff)); // TODO: get rid of this malloc if possible
if (transpose) {
// copy data & transpose
float * arr = (float *)
|
diff_tmp.data()
|
;
for (int ir = 0; ir < n_rows; ++ir) {
for (int ic = 0; ic < n_embd; ++ic) {
float f = arr[ir*n_embd + ic];
ggml_set_f32_nd(diff, ir, ic, 0, 0, f);
}
}
} else {
// only copy
memcpy(diff->data, diff_tmp.data(), ggml_nbytes(diff));
}
v_diff.push_back(diff);
print_debug_tensor(diff);
// free memory of diff_tmp
diff_tmp.resize(0);
}
}
~train_context() {
for (auto ptr : v_final) free(ptr->data);
for (auto ptr : v_diff) free(ptr->data);
|
ast_based
|
<fim_prefix>ection_handles_rect.size.width - selection_rect.size.width) / 4.0);
Point2 rel_pos;
// Calculate the scale according with the distance between the mouse's position (adjusted so that the cursor appears inside the handles)
// and the opposite end of the `selection_rect`.
if (scaling_selection_handles.x != 0) {
if (scaling_selection_handles.x == 1) { // Right Handle
const int handle_adjust = Math::round(mp.x - (scaling_selection_scale.x >= 0 ? selection_rect.position.x : (selection_rect.position.x + selection_rect.size.width)));
mp.x -= MIN(Math::abs(handle_adjust), handle_length) * scaling_selection_handles.x * SIGN(handle_adjust);
if (editor->is_snap_keys_enabled()) <fim_suffix>
mp.x = (mp.x - timeline->get_value()) * timeline->get_zoom_scale() + limit;
}
rel_pos.x = scaling_selection_scale.x >= 0 ? (mp.x - selection_rect.position.x) : selection_rect.position.x + selection_rect.size.width - mp.x;
} else { // Left Handle
const int handle_adjust = Math::round((scaling_selection_scale.x >= 0 ? (selection_rect.position.x + selection_rect.size.width) : selection_rect.position.x) - mp.x);
mp.x -= MIN(Math::abs(handle_adjust), handle_length) * scaling_selection_handles.x * SIGN(handle_adjust);
const float x = editor->snap_time((mp.x - limit) / timeline->get_zoom_scale(), true) + timeline->get_value();
if (editor->is_snap_keys_enabled()) {<fim_middle>{
mp.x = editor->snap_time((mp.x - limit) / timeline->get_zoom_scale(), true) + timeline->get_value();<fim_end>
|
ection_handles_rect.size.width - selection_rect.size.width) / 4.0);
Point2 rel_pos;
// Calculate the scale according with the distance between the mouse's position (adjusted so that the cursor appears inside the handles)
// and the opposite end of the `selection_rect`.
if (scaling_selection_handles.x != 0) {
if (scaling_selection_handles.x == 1) { // Right Handle
const int handle_adjust = Math::round(mp.x - (scaling_selection_scale.x >= 0 ? selection_rect.position.x : (selection_rect.position.x + selection_rect.size.width)));
mp.x -= MIN(Math::abs(handle_adjust), handle_length) * scaling_selection_handles.x * SIGN(handle_adjust);
if (editor->is_snap_keys_enabled())
|
{
mp.x = editor->snap_time((mp.x - limit) / timeline->get_zoom_scale(), true) + timeline->get_value();
|
mp.x = (mp.x - timeline->get_value()) * timeline->get_zoom_scale() + limit;
}
rel_pos.x = scaling_selection_scale.x >= 0 ? (mp.x - selection_rect.position.x) : selection_rect.position.x + selection_rect.size.width - mp.x;
} else { // Left Handle
const int handle_adjust = Math::round((scaling_selection_scale.x >= 0 ? (selection_rect.position.x + selection_rect.size.width) : selection_rect.position.x) - mp.x);
mp.x -= MIN(Math::abs(handle_adjust), handle_length) * scaling_selection_handles.x * SIGN(handle_adjust);
const float x = editor->snap_time((mp.x - limit) / timeline->get_zoom_scale(), true) + timeline->get_value();
if (editor->is_snap_keys_enabled()) {
|
ast_based
|
<fim_prefix> }
if (ae->node) {
accesskit_node_free(ae->node);
}
memdelete(ae);
rid_owner.free(p_id);
}
void AccessibilityDriverAccessKit::accessibility_free_element(const RID &p_id) {
ERR_FAIL_COND_MSG(in_accessibility_update, "Element can't be removed inside NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
if (ae) {
WindowData *wd = windows.getptr(ae->window_id);
AccessibilityElement *parent_ae = rid_owner.get_or_null(ae->parent);
if (parent_ae) {
parent_ae->children.erase(p_id);
}
_free_recursive(wd, p_id);
}
}
void AccessibilityDriverAccessKit::accessibility_element_set_meta(const RID &p_id, const Variant &p_meta) {
<fim_suffix>
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
ae->meta = p_meta;
}
Variant AccessibilityDriverAccessKit::accessibility_element_get_meta(const RID &p_id) const {
const AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL_V(ae, Variant());
return ae->meta;
}
void AccessibilityDriverAccessKit::accessibility_update_set_focus(const RID &p_id) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
if (p_id.is_valid() && rid_owner.owns(p_id)) {
focus = p_id;
} else {
focus = RID();
}
}
RID AccessibilityDriverAccessKit::accessibility_get_windo<fim_middle>ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");<fim_end>
|
}
if (ae->node) {
accesskit_node_free(ae->node);
}
memdelete(ae);
rid_owner.free(p_id);
}
void AccessibilityDriverAccessKit::accessibility_free_element(const RID &p_id) {
ERR_FAIL_COND_MSG(in_accessibility_update, "Element can't be removed inside NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
if (ae) {
WindowData *wd = windows.getptr(ae->window_id);
AccessibilityElement *parent_ae = rid_owner.get_or_null(ae->parent);
if (parent_ae) {
parent_ae->children.erase(p_id);
}
_free_recursive(wd, p_id);
}
}
void AccessibilityDriverAccessKit::accessibility_element_set_meta(const RID &p_id, const Variant &p_meta) {
|
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
|
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
ae->meta = p_meta;
}
Variant AccessibilityDriverAccessKit::accessibility_element_get_meta(const RID &p_id) const {
const AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL_V(ae, Variant());
return ae->meta;
}
void AccessibilityDriverAccessKit::accessibility_update_set_focus(const RID &p_id) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
if (p_id.is_valid() && rid_owner.owns(p_id)) {
focus = p_id;
} else {
focus = RID();
}
}
RID AccessibilityDriverAccessKit::accessibility_get_windo
|
ast_based
|
<fim_prefix>->node);
} else {
accesskit_node_clear_disabled(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_CLIPS_CHILDREN: {
if (p_value) {
accesskit_node_set_clips_children(ae->node);
} else {
accesskit_node_clear_clips_children(ae->node);
}
} break;
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_classname(const RID &p_id, const String &p_classname) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_classname.is_empty()) {
<fim_suffix>
} else {
accesskit_node_clear_class_name(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_placeholder(const RID &p_id, const String &p_placeholder) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_placeholder.is_empty()) {
accesskit_node_set_placeholder(ae->node, p_placeholder.utf8().ptr());
} else {
accesskit_node_clear_placeholder(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_language(const RID &p_id, const String <fim_middle>accesskit_node_set_class_name(ae->node, p_classname.utf8().ptr());<fim_end>
|
->node);
} else {
accesskit_node_clear_disabled(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_CLIPS_CHILDREN: {
if (p_value) {
accesskit_node_set_clips_children(ae->node);
} else {
accesskit_node_clear_clips_children(ae->node);
}
} break;
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_classname(const RID &p_id, const String &p_classname) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_classname.is_empty()) {
|
accesskit_node_set_class_name(ae->node, p_classname.utf8().ptr());
|
} else {
accesskit_node_clear_class_name(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_placeholder(const RID &p_id, const String &p_placeholder) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_placeholder.is_empty()) {
accesskit_node_set_placeholder(ae->node, p_placeholder.utf8().ptr());
} else {
accesskit_node_clear_placeholder(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_language(const RID &p_id, const String
|
ast_based
|
<fim_prefix> auto PeekArray() const -> llvm::ArrayRef<ValueT> {
CARBON_CHECK(!array_offsets_.empty());
return llvm::ArrayRef(values_).slice(array_offsets_.back());
}
auto PeekArray() -> llvm::MutableArrayRef<ValueT> {
CARBON_CHECK(!array_offsets_.empty());
return llvm::MutableArrayRef(values_).slice(array_offsets_.back());
}
// Returns the array at a specific index.
auto PeekArrayAt(int index) const -> llvm::ArrayRef<ValueT> {
auto ref = llvm::ArrayRef(values_).slice(array_offsets_[index]);
if (index + 1 < static_cast<int>(array_offsets_.size())) {
ref = ref.take_front(array_offsets_[index + 1] - array_offsets_[index]);
}
return ref;
}<fim_suffix> // Appends a value to the top array on the stack.
auto AppendToTop(const ValueT& value) -> void {
CARBON_CHECK(!array_offsets_.empty(),
"Must call PushArray before AppendToTop.");
values_.push_back(value);
}
// Appends a value to the top array on the stack.
auto AppendToTop(ValueT&& value) -> void {
CARBON_CHECK(!array_offsets_.empty(),
"Must call PushArray before AppendToTop.");
values_.push_back(std::move(value));
}
// Adds multiple values to the top array on the stack.
auto AppendToTop(llvm::ArrayRef<ValueT> values) -> void {
CARBON_CHECK(!array_offsets_.empty(),<fim_middle>
// Returns the full set of values on the stack, regardless of whether any
// arrays are pushed.
auto PeekAllValues() const -> llvm::ArrayRef<ValueT> { return values_; }
<fim_end>
|
auto PeekArray() const -> llvm::ArrayRef<ValueT> {
CARBON_CHECK(!array_offsets_.empty());
return llvm::ArrayRef(values_).slice(array_offsets_.back());
}
auto PeekArray() -> llvm::MutableArrayRef<ValueT> {
CARBON_CHECK(!array_offsets_.empty());
return llvm::MutableArrayRef(values_).slice(array_offsets_.back());
}
// Returns the array at a specific index.
auto PeekArrayAt(int index) const -> llvm::ArrayRef<ValueT> {
auto ref = llvm::ArrayRef(values_).slice(array_offsets_[index]);
if (index + 1 < static_cast<int>(array_offsets_.size())) {
ref = ref.take_front(array_offsets_[index + 1] - array_offsets_[index]);
}
return ref;
}
|
// Returns the full set of values on the stack, regardless of whether any
// arrays are pushed.
auto PeekAllValues() const -> llvm::ArrayRef<ValueT> { return values_; }
|
// Appends a value to the top array on the stack.
auto AppendToTop(const ValueT& value) -> void {
CARBON_CHECK(!array_offsets_.empty(),
"Must call PushArray before AppendToTop.");
values_.push_back(value);
}
// Appends a value to the top array on the stack.
auto AppendToTop(ValueT&& value) -> void {
CARBON_CHECK(!array_offsets_.empty(),
"Must call PushArray before AppendToTop.");
values_.push_back(std::move(value));
}
// Adds multiple values to the top array on the stack.
auto AppendToTop(llvm::ArrayRef<ValueT> values) -> void {
CARBON_CHECK(!array_offsets_.empty(),
|
random
|
<fim_prefix>n flags
nullptr, // Environment
nullptr, // Current directory
&si, // STARTUPINFO
&pi // PROCESS_INFORMATION
)) {
return EXIT_FAILURE;
}
// Close unnecessary pipe handles in the parent process
::CloseHandle(hChildStdoutWrite);
// Read the child process's stdout and stderr and redirect them to the parent's stdout
DWORD bytesRead;
std::array<char, 4096> buffer;
while (true) {
// Read from stdout
if (::ReadFile(hChildStdoutRead, buffer.data(), buffer.size(), &bytesRead, nullptr)) {
// Write to the parent's stdout
<fim_suffix>
} else {
break;
}
}
// Wait for the child process to exit
::WaitForSingleObject(pi.hProcess, INFINITE);
// Clean up
::CloseHandle(pi.hProcess);
::CloseHandle(pi.hThread);
::CloseHandle(hChildStdoutRead);
return EXIT_SUCCESS;
}
int main() {
setupConsoleWindow();
return launchExecutable();
}<fim_middle>if (bytesRead > 0)
::WriteFile(GetStdHandle(STD_OUTPUT_HANDLE), buffer.data(), bytesRead, &bytesRead, nullptr);<fim_end>
|
n flags
nullptr, // Environment
nullptr, // Current directory
&si, // STARTUPINFO
&pi // PROCESS_INFORMATION
)) {
return EXIT_FAILURE;
}
// Close unnecessary pipe handles in the parent process
::CloseHandle(hChildStdoutWrite);
// Read the child process's stdout and stderr and redirect them to the parent's stdout
DWORD bytesRead;
std::array<char, 4096> buffer;
while (true) {
// Read from stdout
if (::ReadFile(hChildStdoutRead, buffer.data(), buffer.size(), &bytesRead, nullptr)) {
// Write to the parent's stdout
|
if (bytesRead > 0)
::WriteFile(GetStdHandle(STD_OUTPUT_HANDLE), buffer.data(), bytesRead, &bytesRead, nullptr);
|
} else {
break;
}
}
// Wait for the child process to exit
::WaitForSingleObject(pi.hProcess, INFINITE);
// Clean up
::CloseHandle(pi.hProcess);
::CloseHandle(pi.hThread);
::CloseHandle(hChildStdoutRead);
return EXIT_SUCCESS;
}
int main() {
setupConsoleWindow();
return launchExecutable();
}
|
ast_based
|
<fim_prefix>nclude <filesystem>
#include <stdexcept>
#include <string>
#include <utility>
namespace fs = std::filesystem;
class Dlhandle {
void *chandle = nullptr;
public:
class Exception : public std::runtime_error {
public:
using std::runtime_error::runtime_error;
};
Dlhandle() = default;
Dlhandle(const fs::path &fpath);
Dlhandle(const Dlhandle &o) = delete;
Dlhandle(Dlhandle &&o)
: chandle(o.chandle)
{
o.chandle = nullptr;
}
~Dlhandle();
Dlhandle &operator=(Dlhandle &&o) {
chandle = std::exchange(o.chandle, nullptr);
return *this;
}
template <typename T>
T *get(const std::string &symbol) const <fim_suffix>
auto get_fnc(const std::string &symbol) const {
return get<void*(...)>(symbol);
}
private:
void *get_internal(const char *symbol) const;
};
<fim_middle>{
return reinterpret_cast<T *>(get_internal(symbol.c_str()));
}<fim_end>
|
nclude <filesystem>
#include <stdexcept>
#include <string>
#include <utility>
namespace fs = std::filesystem;
class Dlhandle {
void *chandle = nullptr;
public:
class Exception : public std::runtime_error {
public:
using std::runtime_error::runtime_error;
};
Dlhandle() = default;
Dlhandle(const fs::path &fpath);
Dlhandle(const Dlhandle &o) = delete;
Dlhandle(Dlhandle &&o)
: chandle(o.chandle)
{
o.chandle = nullptr;
}
~Dlhandle();
Dlhandle &operator=(Dlhandle &&o) {
chandle = std::exchange(o.chandle, nullptr);
return *this;
}
template <typename T>
T *get(const std::string &symbol) const
|
{
return reinterpret_cast<T *>(get_internal(symbol.c_str()));
}
|
auto get_fnc(const std::string &symbol) const {
return get<void*(...)>(symbol);
}
private:
void *get_internal(const char *symbol) const;
};
|
ast_based
|
<fim_prefix>_mem);
}
}
void AndroidInputHandler::process_key_event(int p_physical_keycode, int p_unicode, int p_key_label, bool p_pressed, bool p_echo) {
static char32_t prev_wc = 0;
char32_t unicode = p_unicode;
if ((p_unicode & 0xfffffc00) == 0xd800) {
if (prev_wc != 0) {
ERR_PRINT("invalid utf16 surrogate input");
}
prev_wc = unicode;
return; // Skip surrogate.
} else if ((unicode & 0xfffffc00) == 0xdc00) {
if (prev_wc == 0) {
ERR_PRINT("invalid utf16 surrogate input");
return; // Skip invalid surrogate.
}
unicode = (prev_wc << 10UL) + unicode - ((0xd800 << 10UL) + 0xdc00 - 0x10000);
prev_wc = 0;
} else {
prev_wc = 0;
}
Ref<InputEventKey> ev;
ev.instantiate();
<fim_suffix>
Key keycode;
if (unicode == '\b') { // 0x08
keycode = Key::BACKSPACE;
} else if (unicode == '\t') { // 0x09
keycode = Key::TAB;
} else if (unicode == '\n') { // 0x0A
keycode = Key::ENTER;
} else if (unicode == 0x1B) {
keycode = Key::ESCAPE;
} else if (unicode == 0x1F) {
keycode = Key::KEY_DELETE;
} else {
keycode = fix_keycode(unicode, physical_keycode);
}
switch (physical_keycode) {
case Key::SHIFT: {
shift_mem = p_pressed;
} break;
case Key::ALT: {
alt_mem = p_pressed;
} break;
case Key::CTRL: {
control_mem = p_pressed;
} break;
case Key::META: {
meta_mem = p_pressed;
} break;
default:
break;
}
ev->set_keycode(keycode);
ev->set_ph<fim_middle>Key physical_keycode = godot_code_from_android_code(p_physical_keycode);<fim_end>
|
_mem);
}
}
void AndroidInputHandler::process_key_event(int p_physical_keycode, int p_unicode, int p_key_label, bool p_pressed, bool p_echo) {
static char32_t prev_wc = 0;
char32_t unicode = p_unicode;
if ((p_unicode & 0xfffffc00) == 0xd800) {
if (prev_wc != 0) {
ERR_PRINT("invalid utf16 surrogate input");
}
prev_wc = unicode;
return; // Skip surrogate.
} else if ((unicode & 0xfffffc00) == 0xdc00) {
if (prev_wc == 0) {
ERR_PRINT("invalid utf16 surrogate input");
return; // Skip invalid surrogate.
}
unicode = (prev_wc << 10UL) + unicode - ((0xd800 << 10UL) + 0xdc00 - 0x10000);
prev_wc = 0;
} else {
prev_wc = 0;
}
Ref<InputEventKey> ev;
ev.instantiate();
|
Key physical_keycode = godot_code_from_android_code(p_physical_keycode);
|
Key keycode;
if (unicode == '\b') { // 0x08
keycode = Key::BACKSPACE;
} else if (unicode == '\t') { // 0x09
keycode = Key::TAB;
} else if (unicode == '\n') { // 0x0A
keycode = Key::ENTER;
} else if (unicode == 0x1B) {
keycode = Key::ESCAPE;
} else if (unicode == 0x1F) {
keycode = Key::KEY_DELETE;
} else {
keycode = fix_keycode(unicode, physical_keycode);
}
switch (physical_keycode) {
case Key::SHIFT: {
shift_mem = p_pressed;
} break;
case Key::ALT: {
alt_mem = p_pressed;
} break;
case Key::CTRL: {
control_mem = p_pressed;
} break;
case Key::META: {
meta_mem = p_pressed;
} break;
default:
break;
}
ev->set_keycode(keycode);
ev->set_ph
|
ast_based
|
<fim_prefix> float c = (t - low_pos.x) / (high_pos.x - low_pos.x);
h = low_pos.lerp(high_pos, c).y;
}
h = _bezier_h_to_pixel(h);
Vector2 pos(j, h);
if (j > from_x) {
lines.push_back(prev_pos);
lines.push_back(pos);
}
prev_pos = pos;
}
if (lines.size() >= 2) {
draw_multiline(lines, p_color, Math::round(EDSCALE), true);
}
}
}
void AnimationBezierTrackEdit::_draw_line_clipped(const Vector2 &p_from, const Vector2 &p_to, const Color &p_color, int p_clip_left, int p_clip_right) {
Vector2 from = p_from;
Vector2 to = p_to;
if (from.x == to.x && from.y == to.y) {
return;
}
if (to.x < from.x) {
SWAP(to, from);
}
if (to.x < p_clip_left) {
return;
}
<fim_suffix>
if (to.x > p_clip_right) {
float c = (p_clip_right - from.x) / (to.x - from.x);
to = from.lerp(to, c);
}
if (from.x < p_clip_left) {
float c = (p_clip_left - from.x) / (to.x - from.x);
from = from.lerp(to, c);
}
draw_line(from, to, p_color, Math::round(EDSCALE), true);
}
void AnimationBezierTrackEdit::_notification(int p_what) {
switch (p_what) {
case EditorSettings::NOTIFICATION_EDITOR_SETTINGS_CHANGED: {
if (EditorSettings::get_singleton()->check_changed_settings_in_group("editors/panning")) {
panner->setup((ViewPanner::ControlScheme)EDITOR_GET("editors/panning/animation_editors_panning_scheme").operator int(), ED_GET_SHORTCUT("canvas_item_editor/pan_view"), boo<fim_middle>if (from.x > p_clip_right) {
return;
}<fim_end>
|
float c = (t - low_pos.x) / (high_pos.x - low_pos.x);
h = low_pos.lerp(high_pos, c).y;
}
h = _bezier_h_to_pixel(h);
Vector2 pos(j, h);
if (j > from_x) {
lines.push_back(prev_pos);
lines.push_back(pos);
}
prev_pos = pos;
}
if (lines.size() >= 2) {
draw_multiline(lines, p_color, Math::round(EDSCALE), true);
}
}
}
void AnimationBezierTrackEdit::_draw_line_clipped(const Vector2 &p_from, const Vector2 &p_to, const Color &p_color, int p_clip_left, int p_clip_right) {
Vector2 from = p_from;
Vector2 to = p_to;
if (from.x == to.x && from.y == to.y) {
return;
}
if (to.x < from.x) {
SWAP(to, from);
}
if (to.x < p_clip_left) {
return;
}
|
if (from.x > p_clip_right) {
return;
}
|
if (to.x > p_clip_right) {
float c = (p_clip_right - from.x) / (to.x - from.x);
to = from.lerp(to, c);
}
if (from.x < p_clip_left) {
float c = (p_clip_left - from.x) / (to.x - from.x);
from = from.lerp(to, c);
}
draw_line(from, to, p_color, Math::round(EDSCALE), true);
}
void AnimationBezierTrackEdit::_notification(int p_what) {
switch (p_what) {
case EditorSettings::NOTIFICATION_EDITOR_SETTINGS_CHANGED: {
if (EditorSettings::get_singleton()->check_changed_settings_in_group("editors/panning")) {
panner->setup((ViewPanner::ControlScheme)EDITOR_GET("editors/panning/animation_editors_panning_scheme").operator int(), ED_GET_SHORTCUT("canvas_item_editor/pan_view"), boo
|
ast_based
|
<fim_prefix> else
{
for(int i = 0; i < ctx->channels; ++i)
ctx->borderValues[i] = (CAROTENE_NS::u8)cv::saturate_cast<uchar>(borderValue[i]);
}
break;
case CV_HAL_BORDER_REPLICATE:
ctx->border = CAROTENE_NS::BORDER_MODE_REPLICATE;
break;
case CV_HAL_BORDER_REFLECT:
ctx->border = CAROTENE_NS::BORDER_MODE_REFLECT;
break;
case CV_HAL_BORDER_WRAP:
ctx->border = CAROTENE_NS::BORDER_MODE_WRAP;
break;
case CV_HAL_BORDER_REFLECT_101:
ctx->border = CAROTENE_NS::BORDER_MODE_REFLECT101;
break;
default:
delete ctx;
return CV_HAL_ERROR_NOT_IMPLEMENTED;
}
<fim_suffix>;
return CV_HAL_ERROR_OK;
}
inline int TEGRA_MORPHFREE(cvhalFilter2D *context)
{
if(context)
{
delete (MorphCtx*)context;
return CV_HAL_ERROR_OK;
}
else
{
return CV_HAL_ERROR_UNKNOWN;
}
}
#define TEGRA_MORPHIMPL(context, src_data, src_step, dst_data, dst_step, width, height, src_full_width, src_full_height, src_roi_x, src_roi_y, dst_full_width, dst_full_height, dst_roi_x, dst_roi_y) \
( \
(void)dst_full_width, (void)dst_full_height, (void)dst_roi_x, (void)dst_roi_y, \
context && CAROTENE_NS::isSupportedConfiguration() ? \
((MorphCtx*)context)->operation == CV_HAL_MORPH_ERODE ? \
CAROTENE_NS::erode(CAROTENE_NS::Size2D<fim_middle>*context = (cvhalFilter2D*)(ctx)<fim_end>
|
else
{
for(int i = 0; i < ctx->channels; ++i)
ctx->borderValues[i] = (CAROTENE_NS::u8)cv::saturate_cast<uchar>(borderValue[i]);
}
break;
case CV_HAL_BORDER_REPLICATE:
ctx->border = CAROTENE_NS::BORDER_MODE_REPLICATE;
break;
case CV_HAL_BORDER_REFLECT:
ctx->border = CAROTENE_NS::BORDER_MODE_REFLECT;
break;
case CV_HAL_BORDER_WRAP:
ctx->border = CAROTENE_NS::BORDER_MODE_WRAP;
break;
case CV_HAL_BORDER_REFLECT_101:
ctx->border = CAROTENE_NS::BORDER_MODE_REFLECT101;
break;
default:
delete ctx;
return CV_HAL_ERROR_NOT_IMPLEMENTED;
}
|
*context = (cvhalFilter2D*)(ctx)
|
;
return CV_HAL_ERROR_OK;
}
inline int TEGRA_MORPHFREE(cvhalFilter2D *context)
{
if(context)
{
delete (MorphCtx*)context;
return CV_HAL_ERROR_OK;
}
else
{
return CV_HAL_ERROR_UNKNOWN;
}
}
#define TEGRA_MORPHIMPL(context, src_data, src_step, dst_data, dst_step, width, height, src_full_width, src_full_height, src_roi_x, src_roi_y, dst_full_width, dst_full_height, dst_roi_x, dst_roi_y) \
( \
(void)dst_full_width, (void)dst_full_height, (void)dst_roi_x, (void)dst_roi_y, \
context && CAROTENE_NS::isSupportedConfiguration() ? \
((MorphCtx*)context)->operation == CV_HAL_MORPH_ERODE ? \
CAROTENE_NS::erode(CAROTENE_NS::Size2D
|
ast_based
|
<fim_prefix> std::vector<llama_token> tokens_pos;
std::vector<llama_token> tokens_neg;
size_t max_seq_len;
tokenized_prompt(llama_context * ctx, std::string pos, std::string neg) {
const llama_model * model = llama_get_model(ctx);
const llama_vocab * vocab = llama_model_get_vocab(model);
const bool add_bos = llama_vocab_get_add_bos(vocab);
tokens_pos = common_tokenize(ctx, pos, add_bos, true);
tokens_neg = common_tokenize(ctx, neg, add_bos, true);
max_seq_len = std::max(tokens_pos.size(), tokens_neg.size());
padding_seq(ctx, tokens_pos, max_seq_len);
padding_seq(ctx, tokens_neg, max_seq_len);
}
<fim_suffix> std::vector<llama_token> pad_tokens = common_tokenize(ctx, " ", false);
llama_token pad_tok = pad_tokens.back();
while (tokens.size() < len) {
tokens.push_back(pad_tok);
}
}
};
//////////////////////////////////////////////////
template <typename T>
static std::string to_string(const T & val) {
std::stringstream ss;
ss << val;
return ss.str();
}
static std::vector<std::string> ctrlvec_load_prompt_file(std::string path, bool skip_empty_lines) {
std::vector<std::string> output;
std::ifstream file(path);
if (!file.is_open()) {
fprintf(stderr, "error: unable to open file: %s\n", path.c_str());
exit(1);<fim_middle> void padding_seq(llama_context * ctx, std::vector<llama_token> & tokens, size_t len) {
// TODO: customize padding token<fim_end>
|
std::vector<llama_token> tokens_pos;
std::vector<llama_token> tokens_neg;
size_t max_seq_len;
tokenized_prompt(llama_context * ctx, std::string pos, std::string neg) {
const llama_model * model = llama_get_model(ctx);
const llama_vocab * vocab = llama_model_get_vocab(model);
const bool add_bos = llama_vocab_get_add_bos(vocab);
tokens_pos = common_tokenize(ctx, pos, add_bos, true);
tokens_neg = common_tokenize(ctx, neg, add_bos, true);
max_seq_len = std::max(tokens_pos.size(), tokens_neg.size());
padding_seq(ctx, tokens_pos, max_seq_len);
padding_seq(ctx, tokens_neg, max_seq_len);
}
|
void padding_seq(llama_context * ctx, std::vector<llama_token> & tokens, size_t len) {
// TODO: customize padding token
|
std::vector<llama_token> pad_tokens = common_tokenize(ctx, " ", false);
llama_token pad_tok = pad_tokens.back();
while (tokens.size() < len) {
tokens.push_back(pad_tok);
}
}
};
//////////////////////////////////////////////////
template <typename T>
static std::string to_string(const T & val) {
std::stringstream ss;
ss << val;
return ss.str();
}
static std::vector<std::string> ctrlvec_load_prompt_file(std::string path, bool skip_empty_lines) {
std::vector<std::string> output;
std::ifstream file(path);
if (!file.is_open()) {
fprintf(stderr, "error: unable to open file: %s\n", path.c_str());
exit(1);
|
random
|
<fim_prefix> src2, sz2, \
dst, sz), \
CV_HAL_ERROR_OK \
: CV_HAL_ERROR_NOT_IMPLEMENTED \
)
#define TEGRA_AND(src1, sz1, src2, sz2, dst, sz, w, h) \
( \
CAROTENE_NS::isSupportedConfiguration() ? \
CAROTENE_NS::bitwiseAnd(CAROTENE_NS::Size2D(w, h), \
src1, sz1, \
src2, sz2, \
dst, sz), \
CV_HAL_ERROR_OK \
: CV_HAL_ERROR_NOT_IMPLEMENTED \
)
#define TEGRA_OR(src1, sz1, src2, sz2, dst, sz, w, h) \
( \
CAROTENE_NS::isSupportedConfiguration() ? \
CAROTENE_NS::bitwiseOr(CAROTENE_NS::Size2D(w, h), \
src1, sz1, \
src2, sz2, \<fim_suffix>#define TEGRA_XOR(src1, sz1, src2, sz2, dst, sz, w, h) \
( \
CAROTENE_NS::isSupportedConfiguration() ? \
CAROTENE_NS::bitwiseXor(CAROTENE_NS::Size2D(w, h), \
src1, sz1, \
src2, sz2, \
dst, sz), \
CV_HAL_ERROR_OK \
: CV_HAL_ERROR_NOT_IMPLEMENTED \
)
#define TEGRA_NOT(src1, sz1, dst, sz, w, h) \
( \
CAROTENE_NS::isSupportedConfiguration() ? \
CAROTENE_NS::bitwiseNot(CAROTENE_NS::Size2D(w, h), \
src1, sz1, \
dst, sz), \
CV_HAL_ERROR_OK \
: CV_HAL_ERROR_NOT_IMPLEMENTED \
)
#undef cv_hal_add8u
#define cv_hal_add8u TEGRA_ADD
#undef cv_hal_add8s<fim_middle> dst, sz), \
CV_HAL_ERROR_OK \
: CV_HAL_ERROR_NOT_IMPLEMENTED \
)
<fim_end>
|
src2, sz2, \
dst, sz), \
CV_HAL_ERROR_OK \
: CV_HAL_ERROR_NOT_IMPLEMENTED \
)
#define TEGRA_AND(src1, sz1, src2, sz2, dst, sz, w, h) \
( \
CAROTENE_NS::isSupportedConfiguration() ? \
CAROTENE_NS::bitwiseAnd(CAROTENE_NS::Size2D(w, h), \
src1, sz1, \
src2, sz2, \
dst, sz), \
CV_HAL_ERROR_OK \
: CV_HAL_ERROR_NOT_IMPLEMENTED \
)
#define TEGRA_OR(src1, sz1, src2, sz2, dst, sz, w, h) \
( \
CAROTENE_NS::isSupportedConfiguration() ? \
CAROTENE_NS::bitwiseOr(CAROTENE_NS::Size2D(w, h), \
src1, sz1, \
src2, sz2, \
|
dst, sz), \
CV_HAL_ERROR_OK \
: CV_HAL_ERROR_NOT_IMPLEMENTED \
)
|
#define TEGRA_XOR(src1, sz1, src2, sz2, dst, sz, w, h) \
( \
CAROTENE_NS::isSupportedConfiguration() ? \
CAROTENE_NS::bitwiseXor(CAROTENE_NS::Size2D(w, h), \
src1, sz1, \
src2, sz2, \
dst, sz), \
CV_HAL_ERROR_OK \
: CV_HAL_ERROR_NOT_IMPLEMENTED \
)
#define TEGRA_NOT(src1, sz1, dst, sz, w, h) \
( \
CAROTENE_NS::isSupportedConfiguration() ? \
CAROTENE_NS::bitwiseNot(CAROTENE_NS::Size2D(w, h), \
src1, sz1, \
dst, sz), \
CV_HAL_ERROR_OK \
: CV_HAL_ERROR_NOT_IMPLEMENTED \
)
#undef cv_hal_add8u
#define cv_hal_add8u TEGRA_ADD
#undef cv_hal_add8s
|
random
|
<fim_prefix>int ngl) = 0;
virtual size_t stateSize() const = 0;
virtual size_t saveState(std::span<uint8_t> stateOut, std::vector<Token> &inputTokensOut) const = 0;
virtual size_t restoreState(std::span<const uint8_t> state, std::span<const Token> inputTokens) = 0;
// This method requires the model to return true from supportsCompletion otherwise it will throw
// an error
virtual void prompt(std::string_view prompt,
const PromptCallback &promptCallback,
const ResponseCallback &responseCallback,
const PromptContext &ctx);
virtual int32_t countPromptTokens(std::string_view prompt) const;
<fim_suffix>
// user-specified prefix
virtual void embed(const std::vector<std::string> &texts, float *embeddings, std::optional<std::string> prefix,
int dimensionality = -1, size_t *tokenCount = nullptr, bool doMean = true, bool atlas = false,
EmbedCancelCallback *cancelCb = nullptr);
// automatic prefix
virtual void embed(const std::vector<std::string> &texts, float *embeddings, bool isRetrieval,
int dimensionality = -1, size_t *tokenCount = nullptr, bool doMean = true, bool atlas = false);
virtual void setThreadCount(int32_t n_threads) { (void)n_threads; }
virtual int32_t threadCount() const { return 1; }
<fim_middle>virtual size_t embeddingSize() const {
throw std::logic_error(std::string(implementation().modelType()) + " does not support embeddings");
}<fim_end>
|
int ngl) = 0;
virtual size_t stateSize() const = 0;
virtual size_t saveState(std::span<uint8_t> stateOut, std::vector<Token> &inputTokensOut) const = 0;
virtual size_t restoreState(std::span<const uint8_t> state, std::span<const Token> inputTokens) = 0;
// This method requires the model to return true from supportsCompletion otherwise it will throw
// an error
virtual void prompt(std::string_view prompt,
const PromptCallback &promptCallback,
const ResponseCallback &responseCallback,
const PromptContext &ctx);
virtual int32_t countPromptTokens(std::string_view prompt) const;
|
virtual size_t embeddingSize() const {
throw std::logic_error(std::string(implementation().modelType()) + " does not support embeddings");
}
|
// user-specified prefix
virtual void embed(const std::vector<std::string> &texts, float *embeddings, std::optional<std::string> prefix,
int dimensionality = -1, size_t *tokenCount = nullptr, bool doMean = true, bool atlas = false,
EmbedCancelCallback *cancelCb = nullptr);
// automatic prefix
virtual void embed(const std::vector<std::string> &texts, float *embeddings, bool isRetrieval,
int dimensionality = -1, size_t *tokenCount = nullptr, bool doMean = true, bool atlas = false);
virtual void setThreadCount(int32_t n_threads) { (void)n_threads; }
virtual int32_t threadCount() const { return 1; }
|
ast_based
|
<fim_prefix>/text_line.h"
#include <climits>
float AnimationBezierTrackEdit::_bezier_h_to_pixel(float p_h) {
float h = p_h;
h = (h - timeline_v_scroll) / timeline_v_zoom;
h = (get_size().height / 2.0) - h;
return h;
}
void AnimationBezierTrackEdit::_draw_track(int p_track, const Color &p_color) {
float scale = timeline->get_zoom_scale();
int limit = timeline->get_name_limit();
int right_limit = get_size().width;
// Selection may have altered the order of keys.
RBMap<real_t, int> key_order;
for (int i = 0; i < animation->track_get_key_count(p_track); i++) {
real_t ofs = animation->track_get_key_time(p_track, i);
if (selection.has(IntPair(p_track, i))) {
if (moving_selection) {
<fim_suffix>
} else if (scaling_selection) {
ofs += -scaling_selection_offset.x + (ofs - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
}
}
key_order[ofs] = i;
}
for (RBMap<real_t, int>::Element *E = key_order.front(); E; E = E->next()) {
int i = E->get();
if (!E->next()) {
break;
}
int i_n = E->next()->get();
float offset = animation->track_get_key_time(p_track, i);
float height = animation->bezier_track_get_key_value(p_track, i);
Vector2 out_handle = animation->bezier_track_get_key_out_handle(p_track, i);
if (p_track == moving_handle_track && (moving_handle == -1 || moving_handle == 1) && moving_handle_key == i) {
out_handle = moving_handle_rig<fim_middle>ofs += moving_selection_offset.x;<fim_end>
|
/text_line.h"
#include <climits>
float AnimationBezierTrackEdit::_bezier_h_to_pixel(float p_h) {
float h = p_h;
h = (h - timeline_v_scroll) / timeline_v_zoom;
h = (get_size().height / 2.0) - h;
return h;
}
void AnimationBezierTrackEdit::_draw_track(int p_track, const Color &p_color) {
float scale = timeline->get_zoom_scale();
int limit = timeline->get_name_limit();
int right_limit = get_size().width;
// Selection may have altered the order of keys.
RBMap<real_t, int> key_order;
for (int i = 0; i < animation->track_get_key_count(p_track); i++) {
real_t ofs = animation->track_get_key_time(p_track, i);
if (selection.has(IntPair(p_track, i))) {
if (moving_selection) {
|
ofs += moving_selection_offset.x;
|
} else if (scaling_selection) {
ofs += -scaling_selection_offset.x + (ofs - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
}
}
key_order[ofs] = i;
}
for (RBMap<real_t, int>::Element *E = key_order.front(); E; E = E->next()) {
int i = E->get();
if (!E->next()) {
break;
}
int i_n = E->next()->get();
float offset = animation->track_get_key_time(p_track, i);
float height = animation->bezier_track_get_key_value(p_track, i);
Vector2 out_handle = animation->bezier_track_get_key_out_handle(p_track, i);
if (p_track == moving_handle_track && (moving_handle == -1 || moving_handle == 1) && moving_handle_key == i) {
out_handle = moving_handle_rig
|
ast_based
|
<fim_prefix>Y_PASTE: {
paste_keys(time, true);
} break;
case MENU_KEY_SET_HANDLE_FREE: {
_change_selected_keys_handle_mode(Animation::HANDLE_MODE_FREE);
} break;
case MENU_KEY_SET_HANDLE_LINEAR: {
_change_selected_keys_handle_mode(Animation::HANDLE_MODE_LINEAR);
} break;
case MENU_KEY_SET_HANDLE_BALANCED: {
_change_selected_keys_handle_mode(Animation::HANDLE_MODE_BALANCED);
} break;
case MENU_KEY_SET_HANDLE_MIRRORED: {
_change_selected_keys_handle_mode(Animation::HANDLE_MODE_MIRRORED);
} break;
case MENU_KEY_SET_HANDLE_AUTO_BALANCED: {
_change_selected_keys_handle_mode(Animation::HANDLE_MODE_BALANCED, true);
} break;
case MENU_KEY_SET_HANDLE_AUTO_MIRRORED: {
<fim_suffix>;
} break;
}
}
void AnimationBezierTrackEdit::duplicate_selected_keys(real_t p_ofs, bool p_ofs_valid) {
if (selection.is_empty()) {
return;
}
real_t top_time = 1e10;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t t = animation->track_get_key_time(E->get().first, E->get().second);
if (t < top_time) {
top_time = t;
}
}
EditorUndoRedoManager *undo_redo = EditorUndoRedoManager::get_singleton();
undo_redo->create_action(TTR("Animation Duplicate Keys"));
List<Pair<int, real_t>> new_selection_values;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t t = animation->track_get_key_time(E->get().first, E->get().secon<fim_middle>_change_selected_keys_handle_mode(Animation::HANDLE_MODE_MIRRORED, true)<fim_end>
|
Y_PASTE: {
paste_keys(time, true);
} break;
case MENU_KEY_SET_HANDLE_FREE: {
_change_selected_keys_handle_mode(Animation::HANDLE_MODE_FREE);
} break;
case MENU_KEY_SET_HANDLE_LINEAR: {
_change_selected_keys_handle_mode(Animation::HANDLE_MODE_LINEAR);
} break;
case MENU_KEY_SET_HANDLE_BALANCED: {
_change_selected_keys_handle_mode(Animation::HANDLE_MODE_BALANCED);
} break;
case MENU_KEY_SET_HANDLE_MIRRORED: {
_change_selected_keys_handle_mode(Animation::HANDLE_MODE_MIRRORED);
} break;
case MENU_KEY_SET_HANDLE_AUTO_BALANCED: {
_change_selected_keys_handle_mode(Animation::HANDLE_MODE_BALANCED, true);
} break;
case MENU_KEY_SET_HANDLE_AUTO_MIRRORED: {
|
_change_selected_keys_handle_mode(Animation::HANDLE_MODE_MIRRORED, true)
|
;
} break;
}
}
void AnimationBezierTrackEdit::duplicate_selected_keys(real_t p_ofs, bool p_ofs_valid) {
if (selection.is_empty()) {
return;
}
real_t top_time = 1e10;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t t = animation->track_get_key_time(E->get().first, E->get().second);
if (t < top_time) {
top_time = t;
}
}
EditorUndoRedoManager *undo_redo = EditorUndoRedoManager::get_singleton();
undo_redo->create_action(TTR("Animation Duplicate Keys"));
List<Pair<int, real_t>> new_selection_values;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t t = animation->track_get_key_time(E->get().first, E->get().secon
|
ast_based
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.