text
stringlengths 435
1.65k
| prefix
stringlengths 51
700
| middle
stringlengths 10
200
| suffix
stringlengths 50
700
| type
stringclasses 2
values |
|---|---|---|---|---|
<fim_prefix> convert_weights_ak_to_gg(layer.wv , &w->wv[i*row_length*row_length/n_multiqueries]);
convert_weights_ak_to_gg(layer.w1 , &w->w1[i*row_length*n_ff]);
convert_weights_ak_to_gg(layer.w2 , &w->w2[i*n_ff*row_length]);
convert_weights_ak_to_gg(layer.w3 , &w->w3[i*row_length*n_ff]);
}
struct gguf_context * ctx = gguf_init_empty();
std::vector<const char*> tokens;
std::vector<float> scores;
std::vector<llama_token_type> token_types;
for (const my_llama_vocab::token_data & token_data : vocab->id_to_token) {<fim_suffix> scores.push_back(token_data.score);
token_types.push_back(token_data.type);
}
gguf_set_arr_str(ctx, KV_TOKENIZER_LIST, tokens.data(), tokens.size());
gguf_set_arr_data(ctx, KV_TOKENIZER_SCORES, GGUF_TYPE_FLOAT32, scores.data(), scores.size());
gguf_set_arr_data(ctx, KV_TOKENIZER_TOKEN_TYPE, GGUF_TYPE_INT32, token_types.data(), token_types.size());
gguf_set_val_str(ctx, KV_TOKENIZER_MODEL, TOKENIZER_NAME);
gguf_set_val_str(ctx, KV_GENERAL_ARCHITECTURE, "llama");
gguf_set_val_str(ctx, KV_GENERAL_NAME, "llama");
// special tokens
gguf_set_val_u32(ctx, KV_TOKENIZER_UNK_ID, UNKNOWN_TOKEN_ID);<fim_middle> tokens.push_back(token_data.text.c_str());<fim_end>
|
convert_weights_ak_to_gg(layer.wv , &w->wv[i*row_length*row_length/n_multiqueries]);
convert_weights_ak_to_gg(layer.w1 , &w->w1[i*row_length*n_ff]);
convert_weights_ak_to_gg(layer.w2 , &w->w2[i*n_ff*row_length]);
convert_weights_ak_to_gg(layer.w3 , &w->w3[i*row_length*n_ff]);
}
struct gguf_context * ctx = gguf_init_empty();
std::vector<const char*> tokens;
std::vector<float> scores;
std::vector<llama_token_type> token_types;
for (const my_llama_vocab::token_data & token_data : vocab->id_to_token) {
|
tokens.push_back(token_data.text.c_str());
|
scores.push_back(token_data.score);
token_types.push_back(token_data.type);
}
gguf_set_arr_str(ctx, KV_TOKENIZER_LIST, tokens.data(), tokens.size());
gguf_set_arr_data(ctx, KV_TOKENIZER_SCORES, GGUF_TYPE_FLOAT32, scores.data(), scores.size());
gguf_set_arr_data(ctx, KV_TOKENIZER_TOKEN_TYPE, GGUF_TYPE_INT32, token_types.data(), token_types.size());
gguf_set_val_str(ctx, KV_TOKENIZER_MODEL, TOKENIZER_NAME);
gguf_set_val_str(ctx, KV_GENERAL_ARCHITECTURE, "llama");
gguf_set_val_str(ctx, KV_GENERAL_NAME, "llama");
// special tokens
gguf_set_val_u32(ctx, KV_TOKENIZER_UNK_ID, UNKNOWN_TOKEN_ID);
|
random
|
<fim_prefix>:vector<llama_seq_id> seq_ids(n_parallel, 0);
for (int32_t i = 0; i < n_parallel; ++i) {
seq_ids[i] = i;
}
// evaluate the initial prompt
for (size_t i = 0; i < tokens_list.size(); ++i) {
common_batch_add(batch, tokens_list[i], i, seq_ids, false);
}
GGML_ASSERT(batch.n_tokens == (int) tokens_list.size());
if (llama_model_has_encoder(model)) {
if (llama_encode(ctx, batch)) {
LOG_ERR("%s : failed to eval\n", __func__);
return 1;
}
llama_token decoder_start_token_id = llama_model_decoder_start_token(model);
if (decoder_start_token_id == LLAMA_TOKEN_NULL) {
decoder_start_token_id = <fim_suffix>;
}
common_batch_clear(batch);
common_batch_add(batch, decoder_start_token_id, 0, seq_ids, false);
}
// llama_decode will output logits only for the last token of the prompt
batch.logits[batch.n_tokens - 1] = true;
if (llama_decode(ctx, batch) != 0) {
LOG_ERR("%s: llama_decode() failed\n", __func__);
return 1;
}
//// assign the system KV cache to all parallel sequences
//// this way, the parallel sequences will "reuse" the prompt tokens without having to copy them
//for (int32_t i = 1; i < n_parallel; ++i) {
// llama_kv_cache_seq_cp(ctx, 0, i, -1, -1);
//}
if (n_parallel > 1) {
LOG("\n\n%s: ge<fim_middle>llama_vocab_bos(vocab)<fim_end>
|
:vector<llama_seq_id> seq_ids(n_parallel, 0);
for (int32_t i = 0; i < n_parallel; ++i) {
seq_ids[i] = i;
}
// evaluate the initial prompt
for (size_t i = 0; i < tokens_list.size(); ++i) {
common_batch_add(batch, tokens_list[i], i, seq_ids, false);
}
GGML_ASSERT(batch.n_tokens == (int) tokens_list.size());
if (llama_model_has_encoder(model)) {
if (llama_encode(ctx, batch)) {
LOG_ERR("%s : failed to eval\n", __func__);
return 1;
}
llama_token decoder_start_token_id = llama_model_decoder_start_token(model);
if (decoder_start_token_id == LLAMA_TOKEN_NULL) {
decoder_start_token_id =
|
llama_vocab_bos(vocab)
|
;
}
common_batch_clear(batch);
common_batch_add(batch, decoder_start_token_id, 0, seq_ids, false);
}
// llama_decode will output logits only for the last token of the prompt
batch.logits[batch.n_tokens - 1] = true;
if (llama_decode(ctx, batch) != 0) {
LOG_ERR("%s: llama_decode() failed\n", __func__);
return 1;
}
//// assign the system KV cache to all parallel sequences
//// this way, the parallel sequences will "reuse" the prompt tokens without having to copy them
//for (int32_t i = 1; i < n_parallel; ++i) {
// llama_kv_cache_seq_cp(ctx, 0, i, -1, -1);
//}
if (n_parallel > 1) {
LOG("\n\n%s: ge
|
ast_based
|
<fim_prefix> al_use_projection_transform(&last_projection_transform);
}
bool ImGui_ImplAllegro5_CreateDeviceObjects()
{
ImGui_ImplAllegro5_Data* bd = ImGui_ImplAllegro5_GetBackendData();
// Create an invisible mouse cursor
// Because al_hide_mouse_cursor() seems to mess up with the actual inputs..
ALLEGRO_BITMAP* mouse_cursor = al_create_bitmap(8, 8);
bd->MouseCursorInvisible = al_create_mouse_cursor(mouse_cursor, 0, 0);
al_destroy_bitmap(mouse_cursor);
return true;
}
void ImGui_ImplAllegro5_UpdateTexture(ImTextureData* tex)
{
if (tex->Status == ImTextureStatus_WantCreate)
{
// Create and upload new texture to graphics system<fim_suffix> IM_ASSERT(tex->TexID == ImTextureID_Invalid && tex->BackendUserData == nullptr);
IM_ASSERT(tex->Format == ImTextureFormat_RGBA32);
// Create texture
// (Bilinear sampling is required by default. Set 'io.Fonts->Flags |= ImFontAtlasFlags_NoBakedLines' or 'style.AntiAliasedLinesUseTex = false' to allow point/nearest sampling)
const int new_bitmap_flags = al_get_new_bitmap_flags();
int new_bitmap_format = al_get_new_bitmap_format();
al_set_new_bitmap_flags(ALLEGRO_MEMORY_BITMAP | ALLEGRO_MIN_LINEAR | ALLEGRO_MAG_LINEAR);
al_set_new_bitmap_format(ALLEGRO_PIXEL_FORMAT_ABGR_8888_LE);<fim_middle> //IMGUI_DEBUG_LOG("UpdateTexture #%03d: WantCreate %dx%d\n", tex->UniqueID, tex->Width, tex->Height);<fim_end>
|
al_use_projection_transform(&last_projection_transform);
}
bool ImGui_ImplAllegro5_CreateDeviceObjects()
{
ImGui_ImplAllegro5_Data* bd = ImGui_ImplAllegro5_GetBackendData();
// Create an invisible mouse cursor
// Because al_hide_mouse_cursor() seems to mess up with the actual inputs..
ALLEGRO_BITMAP* mouse_cursor = al_create_bitmap(8, 8);
bd->MouseCursorInvisible = al_create_mouse_cursor(mouse_cursor, 0, 0);
al_destroy_bitmap(mouse_cursor);
return true;
}
void ImGui_ImplAllegro5_UpdateTexture(ImTextureData* tex)
{
if (tex->Status == ImTextureStatus_WantCreate)
{
// Create and upload new texture to graphics system
|
//IMGUI_DEBUG_LOG("UpdateTexture #%03d: WantCreate %dx%d\n", tex->UniqueID, tex->Width, tex->Height);
|
IM_ASSERT(tex->TexID == ImTextureID_Invalid && tex->BackendUserData == nullptr);
IM_ASSERT(tex->Format == ImTextureFormat_RGBA32);
// Create texture
// (Bilinear sampling is required by default. Set 'io.Fonts->Flags |= ImFontAtlasFlags_NoBakedLines' or 'style.AntiAliasedLinesUseTex = false' to allow point/nearest sampling)
const int new_bitmap_flags = al_get_new_bitmap_flags();
int new_bitmap_format = al_get_new_bitmap_format();
al_set_new_bitmap_flags(ALLEGRO_MEMORY_BITMAP | ALLEGRO_MIN_LINEAR | ALLEGRO_MAG_LINEAR);
al_set_new_bitmap_format(ALLEGRO_PIXEL_FORMAT_ABGR_8888_LE);
|
random
|
<fim_prefix> RANGE_DATA(ST, src1_data, src1_step), src1_step, \
RANGE_DATA(DT, dst1_data, dst1_step), dst1_step, __VA_ARGS__)
#define TEGRA_UNARYOPSCALE(type, op, src1, sz1, dst, sz, w, h, scales) \
( \
CAROTENE_NS::isSupportedConfiguration() ? \
parallel_for_(cv::Range(0, h), \
TegraGenOp_##op##_Invoker<const type, type>(src1, sz1, dst, sz, w, h, scales), \
(w * h) / static_cast<double>(1<<16)), \
CV_HAL_ERROR_OK \
: CV_HAL_ERROR_NOT_IMPLEMENTED \
)
TegraUnaryOpScale_Invoker(recip, reciprocal, 1, scale, CAROTENE_NS::CONVERT_POLICY_SATURATE)
<fim_suffix>
#undef cv_hal_mul8u
#define cv_hal_mul8u(src1, sz1, src2, sz2, dst, sz, w, h, scales) TEGRA_BINARYOPSCALE(CAROTENE_NS::u8, mul, src1, sz1, src2, sz2, dst, sz, w, h, scales)
#undef cv_hal_mul8s
#define cv_hal_mul8s(src1, sz1, src2, sz2, dst, sz, w, h, scales) TEGRA_BINARYOPSCALE(CAROTENE_NS::s8, mul, src1, sz1, src2, sz2, dst, sz, w, h, scales)
#undef cv_hal_mul16u
#define cv_hal_mul16u(src1, sz1, src2, sz2, dst, sz, w, h, scales) TEGRA_BINARYOPSCALE(CAROTENE_NS::u16, mul, src1, sz1, src2, sz2, dst, sz, w, h, scales)
#undef cv_hal_mul16s
#define cv_hal_mul16s(src1, sz1, src2, sz2, dst, sz, w, h, scales) TEGRA_BINARYOPSCALE(CAROTENE_NS::s16, mul, src1, sz1, src2, sz2, dst, sz, w, h, scales)
<fim_middle>TegraUnaryOpScale_Invoker(recipf, reciprocal, 1, scale)<fim_end>
|
RANGE_DATA(ST, src1_data, src1_step), src1_step, \
RANGE_DATA(DT, dst1_data, dst1_step), dst1_step, __VA_ARGS__)
#define TEGRA_UNARYOPSCALE(type, op, src1, sz1, dst, sz, w, h, scales) \
( \
CAROTENE_NS::isSupportedConfiguration() ? \
parallel_for_(cv::Range(0, h), \
TegraGenOp_##op##_Invoker<const type, type>(src1, sz1, dst, sz, w, h, scales), \
(w * h) / static_cast<double>(1<<16)), \
CV_HAL_ERROR_OK \
: CV_HAL_ERROR_NOT_IMPLEMENTED \
)
TegraUnaryOpScale_Invoker(recip, reciprocal, 1, scale, CAROTENE_NS::CONVERT_POLICY_SATURATE)
|
TegraUnaryOpScale_Invoker(recipf, reciprocal, 1, scale)
|
#undef cv_hal_mul8u
#define cv_hal_mul8u(src1, sz1, src2, sz2, dst, sz, w, h, scales) TEGRA_BINARYOPSCALE(CAROTENE_NS::u8, mul, src1, sz1, src2, sz2, dst, sz, w, h, scales)
#undef cv_hal_mul8s
#define cv_hal_mul8s(src1, sz1, src2, sz2, dst, sz, w, h, scales) TEGRA_BINARYOPSCALE(CAROTENE_NS::s8, mul, src1, sz1, src2, sz2, dst, sz, w, h, scales)
#undef cv_hal_mul16u
#define cv_hal_mul16u(src1, sz1, src2, sz2, dst, sz, w, h, scales) TEGRA_BINARYOPSCALE(CAROTENE_NS::u16, mul, src1, sz1, src2, sz2, dst, sz, w, h, scales)
#undef cv_hal_mul16s
#define cv_hal_mul16s(src1, sz1, src2, sz2, dst, sz, w, h, scales) TEGRA_BINARYOPSCALE(CAROTENE_NS::s16, mul, src1, sz1, src2, sz2, dst, sz, w, h, scales)
|
ast_based
|
<fim_prefix>EPLICATE;
break;
case CV_HAL_BORDER_REFLECT:
ctx->border = CAROTENE_NS::BORDER_MODE_REFLECT;
break;
case CV_HAL_BORDER_WRAP:
ctx->border = CAROTENE_NS::BORDER_MODE_WRAP;
break;
case CV_HAL_BORDER_REFLECT_101:
ctx->border = CAROTENE_NS::BORDER_MODE_REFLECT101;
break;
default:
delete ctx;
return CV_HAL_ERROR_NOT_IMPLEMENTED;
}
if(!CAROTENE_NS::isSeparableFilter3x3Supported(CAROTENE_NS::Size2D(16, 16), ctx->border, 3, 3))
{
delete ctx;
return CV_HAL_ERROR_NOT_IMPLEMENTED;
}
switch(kernel_type)
{
case CV_8UC1:
ctx->kernelx_data[0]=kernelx_data[0];
<fim_suffix>;
ctx->kernelx_data[2]=kernelx_data[2];
ctx->kernely_data[0]=kernely_data[0];
ctx->kernely_data[1]=kernely_data[1];
ctx->kernely_data[2]=kernely_data[2];
break;
case CV_8SC1:
ctx->kernelx_data[0]=((char*)kernelx_data)[0];
ctx->kernelx_data[1]=((char*)kernelx_data)[1];
ctx->kernelx_data[2]=((char*)kernelx_data)[2];
ctx->kernely_data[0]=((char*)kernely_data)[0];
ctx->kernely_data[1]=((char*)kernely_data)[1];
ctx->kernely_data[2]=((char*)kernely_data)[2];
break;
case CV_16UC1:
ctx->kernelx_data[0]=((int16_t*)kernelx_data)[0];
ctx->kernelx_data[1]=((int16_t*)kernelx_data)[1];
<fim_middle>ctx->kernelx_data[1]=kernelx_data[1]<fim_end>
|
EPLICATE;
break;
case CV_HAL_BORDER_REFLECT:
ctx->border = CAROTENE_NS::BORDER_MODE_REFLECT;
break;
case CV_HAL_BORDER_WRAP:
ctx->border = CAROTENE_NS::BORDER_MODE_WRAP;
break;
case CV_HAL_BORDER_REFLECT_101:
ctx->border = CAROTENE_NS::BORDER_MODE_REFLECT101;
break;
default:
delete ctx;
return CV_HAL_ERROR_NOT_IMPLEMENTED;
}
if(!CAROTENE_NS::isSeparableFilter3x3Supported(CAROTENE_NS::Size2D(16, 16), ctx->border, 3, 3))
{
delete ctx;
return CV_HAL_ERROR_NOT_IMPLEMENTED;
}
switch(kernel_type)
{
case CV_8UC1:
ctx->kernelx_data[0]=kernelx_data[0];
|
ctx->kernelx_data[1]=kernelx_data[1]
|
;
ctx->kernelx_data[2]=kernelx_data[2];
ctx->kernely_data[0]=kernely_data[0];
ctx->kernely_data[1]=kernely_data[1];
ctx->kernely_data[2]=kernely_data[2];
break;
case CV_8SC1:
ctx->kernelx_data[0]=((char*)kernelx_data)[0];
ctx->kernelx_data[1]=((char*)kernelx_data)[1];
ctx->kernelx_data[2]=((char*)kernelx_data)[2];
ctx->kernely_data[0]=((char*)kernely_data)[0];
ctx->kernely_data[1]=((char*)kernely_data)[1];
ctx->kernely_data[2]=((char*)kernely_data)[2];
break;
case CV_16UC1:
ctx->kernelx_data[0]=((int16_t*)kernelx_data)[0];
ctx->kernelx_data[1]=((int16_t*)kernelx_data)[1];
|
ast_based
|
<fim_prefix> -> RefType { return values_.Get(id); }
// Returns a new block formed by applying `transform(elem_id)` to each element
// in the specified block.
template <typename TransformFnT>
auto Transform(IdT id, TransformFnT transform) -> IdT {
llvm::SmallVector<ElementType> block(llvm::map_range(Get(id), transform));
return Add(block);
}
// Adds a block or finds an existing canonical block with the given content,
// and returns an ID to reference it.
auto AddCanonical(ConstRefType content) -> IdT {
if (content.empty()) {
return IdT::Empty;
}
auto result = canonical_blocks_.Insert(
content, [&] { return Add(content); }, KeyContext(this));
return <fim_suffix>;
}
// Promotes an existing block ID to a canonical block ID, or returns an
// existing canonical block ID if the block was already added. The specified
// block must not be modified after this point.
auto MakeCanonical(IdT id) -> IdT {
// Get the content first so that we don't have unnecessary translation of
// the `id` into the content during insertion.
auto result = canonical_blocks_.Insert(
Get(id), [id] { return id; }, KeyContext(this));
return result.key();
}
auto OutputYaml() const -> Yaml::OutputMapping {
return Yaml::OutputMapping([&](Yaml::OutputMapping::Map map) {
for (auto [block_id, block] : values_.enumerate()) {
map.Add<fim_middle>result.key()<fim_end>
|
-> RefType { return values_.Get(id); }
// Returns a new block formed by applying `transform(elem_id)` to each element
// in the specified block.
template <typename TransformFnT>
auto Transform(IdT id, TransformFnT transform) -> IdT {
llvm::SmallVector<ElementType> block(llvm::map_range(Get(id), transform));
return Add(block);
}
// Adds a block or finds an existing canonical block with the given content,
// and returns an ID to reference it.
auto AddCanonical(ConstRefType content) -> IdT {
if (content.empty()) {
return IdT::Empty;
}
auto result = canonical_blocks_.Insert(
content, [&] { return Add(content); }, KeyContext(this));
return
|
result.key()
|
;
}
// Promotes an existing block ID to a canonical block ID, or returns an
// existing canonical block ID if the block was already added. The specified
// block must not be modified after this point.
auto MakeCanonical(IdT id) -> IdT {
// Get the content first so that we don't have unnecessary translation of
// the `id` into the content during insertion.
auto result = canonical_blocks_.Insert(
Get(id), [id] { return id; }, KeyContext(this));
return result.key();
}
auto OutputYaml() const -> Yaml::OutputMapping {
return Yaml::OutputMapping([&](Yaml::OutputMapping::Map map) {
for (auto [block_id, block] : values_.enumerate()) {
map.Add
|
ast_based
|
<fim_prefix> size = size * nmemb;
auto *buf = reinterpret_cast<std::string *>(userp);
buf->append(reinterpret_cast<const char *>(contents), size);
return size;
}
#endif
// In the ideal scenario, Tesseract will start working on data as soon
// as it can. For example, if you stream a filelist through stdin, we
// should start the OCR process as soon as the first filename is
// available. This is particularly useful when hooking Tesseract up to
// slow hardware such as a book scanning machine.
//
// Unfortunately there are tradeoffs. You can't seek on stdin. That
// makes automatic detection of datatype (TIFF? filelist? PNG?)<fim_suffix>// stdin. We'll still do our best if the user likes pipes.
bool TessBaseAPI::ProcessPagesInternal(const char *filename, const char *retry_config,
int timeout_millisec, TessResultRenderer *renderer) {
bool stdInput = !strcmp(filename, "stdin") || !strcmp(filename, "-");
if (stdInput) {
#ifdef WIN32
if (_setmode(_fileno(stdin), _O_BINARY) == -1)
tprintf("ERROR: cin to binary: %s", strerror(errno));
#endif // WIN32
}
if (stream_filelist) {
return ProcessPagesFileList(stdin, nullptr, retry_config, timeout_millisec, renderer,
tesseract_->tessedit_page_number);
}
<fim_middle>// impractical. So we support a command line flag to explicitly
// identify the scenario that really matters: filelists on<fim_end>
|
size = size * nmemb;
auto *buf = reinterpret_cast<std::string *>(userp);
buf->append(reinterpret_cast<const char *>(contents), size);
return size;
}
#endif
// In the ideal scenario, Tesseract will start working on data as soon
// as it can. For example, if you stream a filelist through stdin, we
// should start the OCR process as soon as the first filename is
// available. This is particularly useful when hooking Tesseract up to
// slow hardware such as a book scanning machine.
//
// Unfortunately there are tradeoffs. You can't seek on stdin. That
// makes automatic detection of datatype (TIFF? filelist? PNG?)
|
// impractical. So we support a command line flag to explicitly
// identify the scenario that really matters: filelists on
|
// stdin. We'll still do our best if the user likes pipes.
bool TessBaseAPI::ProcessPagesInternal(const char *filename, const char *retry_config,
int timeout_millisec, TessResultRenderer *renderer) {
bool stdInput = !strcmp(filename, "stdin") || !strcmp(filename, "-");
if (stdInput) {
#ifdef WIN32
if (_setmode(_fileno(stdin), _O_BINARY) == -1)
tprintf("ERROR: cin to binary: %s", strerror(errno));
#endif // WIN32
}
if (stream_filelist) {
return ProcessPagesFileList(stdin, nullptr, retry_config, timeout_millisec, renderer,
tesseract_->tessedit_page_number);
}
|
random
|
<fim_prefix>(curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
}
curlcode = curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
curlcode = curl_easy_setopt(curl, CURLOPT_WRITEDATA, &buf);
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
curlcode = curl_easy_setopt(curl, CURLOPT_USERAGENT, "Tesseract OCR");
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
curlcode = curl_easy_perform(curl);
if (curlcode != CURLE_OK) {
return error("curl_easy_perform");
}
<fim_suffix>;
data = reinterpret_cast<const l_uint8 *>(buf.data());
}
#else
fprintf(stderr, "Error, this tesseract has no URL support\n");
return false;
#endif
} else {
// Check whether the input file can be read.
if (FILE *file = fopen(filename, "rb")) {
fclose(file);
} else {
fprintf(stderr, "Error, cannot read input file %s: %s\n", filename, strerror(errno));
return false;
}
}
// Here is our autodetection
int format;
int r =
(data != nullptr) ? findFileFormatBuffer(data, &format) : findFileFormat(filename, &format);
// Maybe we have a filelist
if (r != 0 || format == IFF_UNKNOWN) {
std::string s;
if (data != nullptr) {
<fim_middle>curl_easy_cleanup(curl)<fim_end>
|
(curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
}
curlcode = curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
curlcode = curl_easy_setopt(curl, CURLOPT_WRITEDATA, &buf);
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
curlcode = curl_easy_setopt(curl, CURLOPT_USERAGENT, "Tesseract OCR");
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
curlcode = curl_easy_perform(curl);
if (curlcode != CURLE_OK) {
return error("curl_easy_perform");
}
|
curl_easy_cleanup(curl)
|
;
data = reinterpret_cast<const l_uint8 *>(buf.data());
}
#else
fprintf(stderr, "Error, this tesseract has no URL support\n");
return false;
#endif
} else {
// Check whether the input file can be read.
if (FILE *file = fopen(filename, "rb")) {
fclose(file);
} else {
fprintf(stderr, "Error, cannot read input file %s: %s\n", filename, strerror(errno));
return false;
}
}
// Here is our autodetection
int format;
int r =
(data != nullptr) ? findFileFormatBuffer(data, &format) : findFileFormat(filename, &format);
// Maybe we have a filelist
if (r != 0 || format == IFF_UNKNOWN) {
std::string s;
if (data != nullptr) {
|
ast_based
|
<fim_prefix>}
void Engine::get_singletons(List<Singleton> *p_singletons) {
for (const Singleton &E : singletons) {
#ifdef TOOLS_ENABLED
if (!is_editor_hint() && E.editor_only) {
continue;
}
#endif
p_singletons->push_back(E);
}
}
String Engine::get_write_movie_path() const {
return write_movie_path;
}
void Engine::set_write_movie_path(const String &p_path) {
write_movie_path = p_path;
}
void Engine::set_shader_cache_path(const String &p_path) {
shader_cache_path = p_path;
}
String Engine::get_shader_cache_path() const {
return shader_cache_path;
}
Engine *Engine::get_singleton() {
return singleton;
}
bool Engine::notify_frame_server_synced() {
frame_server_synced = true;<fim_suffix> freeze_time_scale = p_frozen;
}
void Engine::set_embedded_in_editor(bool p_enabled) {
embedded_in_editor = p_enabled;
}
bool Engine::is_embedded_in_editor() const {
return embedded_in_editor;
}
Engine::Engine() {
singleton = this;
}
Engine::~Engine() {
if (singleton == this) {
singleton = nullptr;
}
}
Engine::Singleton::Singleton(const StringName &p_name, Object *p_ptr, const StringName &p_class_name) :
name(p_name),
ptr(p_ptr),
class_name(p_class_name) {
#ifdef DEBUG_ENABLED
RefCounted *rc = Object::cast_to<RefCounted>(p_ptr);
if (rc && !rc->is_referenced()) {<fim_middle> return server_syncs > SERVER_SYNC_FRAME_COUNT_WARNING;
}
void Engine::set_freeze_time_scale(bool p_frozen) {<fim_end>
|
}
void Engine::get_singletons(List<Singleton> *p_singletons) {
for (const Singleton &E : singletons) {
#ifdef TOOLS_ENABLED
if (!is_editor_hint() && E.editor_only) {
continue;
}
#endif
p_singletons->push_back(E);
}
}
String Engine::get_write_movie_path() const {
return write_movie_path;
}
void Engine::set_write_movie_path(const String &p_path) {
write_movie_path = p_path;
}
void Engine::set_shader_cache_path(const String &p_path) {
shader_cache_path = p_path;
}
String Engine::get_shader_cache_path() const {
return shader_cache_path;
}
Engine *Engine::get_singleton() {
return singleton;
}
bool Engine::notify_frame_server_synced() {
frame_server_synced = true;
|
return server_syncs > SERVER_SYNC_FRAME_COUNT_WARNING;
}
void Engine::set_freeze_time_scale(bool p_frozen) {
|
freeze_time_scale = p_frozen;
}
void Engine::set_embedded_in_editor(bool p_enabled) {
embedded_in_editor = p_enabled;
}
bool Engine::is_embedded_in_editor() const {
return embedded_in_editor;
}
Engine::Engine() {
singleton = this;
}
Engine::~Engine() {
if (singleton == this) {
singleton = nullptr;
}
}
Engine::Singleton::Singleton(const StringName &p_name, Object *p_ptr, const StringName &p_class_name) :
name(p_name),
ptr(p_ptr),
class_name(p_class_name) {
#ifdef DEBUG_ENABLED
RefCounted *rc = Object::cast_to<RefCounted>(p_ptr);
if (rc && !rc->is_referenced()) {
|
random
|
<fim_prefix>ressed() && !mb->is_shift_pressed() && !mb->is_command_or_control_pressed();
if (mb->is_double_click() && !moving_selection && no_mod_key_pressed) {
int x = pos.x - timeline->get_name_limit();
float ofs = x / timeline->get_zoom_scale() + timeline->get_value();
emit_signal(SNAME("timeline_changed"), ofs, false);
}
for (const KeyValue<int, Rect2> &E : subtracks) {
if (E.value.has_point(mb->get_position())) {
if (!locked_tracks.has(E.key) && !hidden_tracks.has(E.key)) {
set_animation_and_track(animation, E.key, read_only);
_clear_selection();
}
return;
}
}
for (const KeyValue<int, RBMap<int, Rect2>> &E : subtrack_icons) {
int track = E.key;
<fim_suffix>
for (const KeyValue<int, Rect2> &I : track_icons) {
if (I.value.has_point(mb->get_position())) {
if (I.key == REMOVE_ICON) {
if (!read_only) {
EditorUndoRedoManager *undo_redo = EditorUndoRedoManager::get_singleton();
undo_redo->create_action("Remove Bezier Track", UndoRedo::MERGE_DISABLE, animation.ptr());
undo_redo->add_do_method(this, "_update_locked_tracks_after", track);
undo_redo->add_do_method(this, "_update_hidden_tracks_after", track);
undo_redo->add_do_method(animation.ptr(), "remove_track", track);
undo_redo->add_undo_method(animation.ptr(), "add_track", Animation::TrackType::TYPE_BEZIER, track);
undo_redo->add<fim_middle>RBMap<int, Rect2> track_icons = E.value;<fim_end>
|
ressed() && !mb->is_shift_pressed() && !mb->is_command_or_control_pressed();
if (mb->is_double_click() && !moving_selection && no_mod_key_pressed) {
int x = pos.x - timeline->get_name_limit();
float ofs = x / timeline->get_zoom_scale() + timeline->get_value();
emit_signal(SNAME("timeline_changed"), ofs, false);
}
for (const KeyValue<int, Rect2> &E : subtracks) {
if (E.value.has_point(mb->get_position())) {
if (!locked_tracks.has(E.key) && !hidden_tracks.has(E.key)) {
set_animation_and_track(animation, E.key, read_only);
_clear_selection();
}
return;
}
}
for (const KeyValue<int, RBMap<int, Rect2>> &E : subtrack_icons) {
int track = E.key;
|
RBMap<int, Rect2> track_icons = E.value;
|
for (const KeyValue<int, Rect2> &I : track_icons) {
if (I.value.has_point(mb->get_position())) {
if (I.key == REMOVE_ICON) {
if (!read_only) {
EditorUndoRedoManager *undo_redo = EditorUndoRedoManager::get_singleton();
undo_redo->create_action("Remove Bezier Track", UndoRedo::MERGE_DISABLE, animation.ptr());
undo_redo->add_do_method(this, "_update_locked_tracks_after", track);
undo_redo->add_do_method(this, "_update_hidden_tracks_after", track);
undo_redo->add_do_method(animation.ptr(), "remove_track", track);
undo_redo->add_undo_method(animation.ptr(), "add_track", Animation::TrackType::TYPE_BEZIER, track);
undo_redo->add
|
ast_based
|
<fim_prefix>( \
depth == CV_8U && CAROTENE_NS::isSupportedConfiguration() ? \
scn == 3 ? \
(swapBlue ? \
parallel_for_(cv::Range(0, height), \
TegraCvtColor_rgb2gray_Invoker(src_data, src_step, dst_data, dst_step, width, height), \
(width * height) / static_cast<double>(1<<16)) : \
parallel_for_(cv::Range(0, height), \
TegraCvtColor_bgr2gray_Invoker(src_data, src_step, dst_data, dst_step, width, height), \
(width * height) / static_cast<double>(1<<16)) ), \
CV_HAL_ERROR_OK : \<fim_suffix> (swapBlue ? \
parallel_for_(cv::Range(0, height), \
TegraCvtColor_rgbx2gray_Invoker(src_data, src_step, dst_data, dst_step, width, height), \
(width * height) / static_cast<double>(1<<16)) : \
parallel_for_(cv::Range(0, height), \
TegraCvtColor_bgrx2gray_Invoker(src_data, src_step, dst_data, dst_step, width, height), \
(width * height) / static_cast<double>(1<<16)) ), \
CV_HAL_ERROR_OK : \
CV_HAL_ERROR_NOT_IMPLEMENTED \
: CV_HAL_ERROR_NOT_IMPLEMENTED \
)
<fim_middle> scn == 4 ? \<fim_end>
|
( \
depth == CV_8U && CAROTENE_NS::isSupportedConfiguration() ? \
scn == 3 ? \
(swapBlue ? \
parallel_for_(cv::Range(0, height), \
TegraCvtColor_rgb2gray_Invoker(src_data, src_step, dst_data, dst_step, width, height), \
(width * height) / static_cast<double>(1<<16)) : \
parallel_for_(cv::Range(0, height), \
TegraCvtColor_bgr2gray_Invoker(src_data, src_step, dst_data, dst_step, width, height), \
(width * height) / static_cast<double>(1<<16)) ), \
CV_HAL_ERROR_OK : \
|
scn == 4 ? \
|
(swapBlue ? \
parallel_for_(cv::Range(0, height), \
TegraCvtColor_rgbx2gray_Invoker(src_data, src_step, dst_data, dst_step, width, height), \
(width * height) / static_cast<double>(1<<16)) : \
parallel_for_(cv::Range(0, height), \
TegraCvtColor_bgrx2gray_Invoker(src_data, src_step, dst_data, dst_step, width, height), \
(width * height) / static_cast<double>(1<<16)) ), \
CV_HAL_ERROR_OK : \
CV_HAL_ERROR_NOT_IMPLEMENTED \
: CV_HAL_ERROR_NOT_IMPLEMENTED \
)
|
random
|
<fim_prefix>get_relative());
mouse_event_info.pos = p_event_pos;
hover_prev_pos = p_event_pos;
}
ev->set_button_mask(event_buttons_mask);
ev->set_pressure(p_pressure);
ev->set_tilt(p_tilt);
Input::get_singleton()->parse_input_event(ev);
} break;
case AMOTION_EVENT_ACTION_SCROLL: {
Ref<InputEventMouseButton> ev;
ev.instantiate();
_set_key_modifier_state(ev, Key::NONE);
if (p_source_mouse_relative) {
ev->set_position(hover_prev_pos);
ev->set_global_position(hover_prev_pos);
} else {
ev->set_position(p_event_pos);
ev->set_global_position(p_event_pos);
}
ev->set_pressed(true);
buttons_state = event_buttons_mask;
if (p_delta.y > 0) {
<fim_suffix>;
} else if (p_delta.y < 0) {
_wheel_button_click(event_buttons_mask, ev, MouseButton::WHEEL_DOWN, -p_delta.y);
}
if (p_delta.x > 0) {
_wheel_button_click(event_buttons_mask, ev, MouseButton::WHEEL_RIGHT, p_delta.x);
} else if (p_delta.x < 0) {
_wheel_button_click(event_buttons_mask, ev, MouseButton::WHEEL_LEFT, -p_delta.x);
}
} break;
}
}
void AndroidInputHandler::_wheel_button_click(BitField<MouseButtonMask> event_buttons_mask, const Ref<InputEventMouseButton> &ev, MouseButton wheel_button, float factor) {
Ref<InputEventMouseButton> evd = ev->duplicate();
_set_key_modifier_state(evd, Key::NONE);
evd->set_button_index(wheel_button);
evd->set_button_mask(e<fim_middle>_wheel_button_click(event_buttons_mask, ev, MouseButton::WHEEL_UP, p_delta.y)<fim_end>
|
get_relative());
mouse_event_info.pos = p_event_pos;
hover_prev_pos = p_event_pos;
}
ev->set_button_mask(event_buttons_mask);
ev->set_pressure(p_pressure);
ev->set_tilt(p_tilt);
Input::get_singleton()->parse_input_event(ev);
} break;
case AMOTION_EVENT_ACTION_SCROLL: {
Ref<InputEventMouseButton> ev;
ev.instantiate();
_set_key_modifier_state(ev, Key::NONE);
if (p_source_mouse_relative) {
ev->set_position(hover_prev_pos);
ev->set_global_position(hover_prev_pos);
} else {
ev->set_position(p_event_pos);
ev->set_global_position(p_event_pos);
}
ev->set_pressed(true);
buttons_state = event_buttons_mask;
if (p_delta.y > 0) {
|
_wheel_button_click(event_buttons_mask, ev, MouseButton::WHEEL_UP, p_delta.y)
|
;
} else if (p_delta.y < 0) {
_wheel_button_click(event_buttons_mask, ev, MouseButton::WHEEL_DOWN, -p_delta.y);
}
if (p_delta.x > 0) {
_wheel_button_click(event_buttons_mask, ev, MouseButton::WHEEL_RIGHT, p_delta.x);
} else if (p_delta.x < 0) {
_wheel_button_click(event_buttons_mask, ev, MouseButton::WHEEL_LEFT, -p_delta.x);
}
} break;
}
}
void AndroidInputHandler::_wheel_button_click(BitField<MouseButtonMask> event_buttons_mask, const Ref<InputEventMouseButton> &ev, MouseButton wheel_button, float factor) {
Ref<InputEventMouseButton> evd = ev->duplicate();
_set_key_modifier_state(evd, Key::NONE);
evd->set_button_index(wheel_button);
evd->set_button_mask(e
|
ast_based
|
<fim_prefix>ng(rect_width_);
tsv_str += "\t" + std::to_string(rect_height_);
tsv_str += "\t-1\t\n";
const std::unique_ptr</*non-const*/ ResultIterator> res_it(GetIterator());
while (!res_it->Empty(RIL_BLOCK)) {
if (res_it->Empty(RIL_WORD)) {
res_it->Next(RIL_WORD);
continue;
}
// Add rows for any new block/paragraph/textline.
if (res_it->IsAtBeginningOf(RIL_BLOCK)) {
block_num++;
par_num = 0;
line_num = 0;
word_num = 0;
tsv_str += "2\t" + std::to_string(page_num); // level 2 - block
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
<fim_suffix>;
AddBoxToTSV(res_it.get(), RIL_BLOCK, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for block
}
if (res_it->IsAtBeginningOf(RIL_PARA)) {
par_num++;
line_num = 0;
word_num = 0;
tsv_str += "3\t" + std::to_string(page_num); // level 3 - paragraph
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_PARA, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for para
}
if (res_it->IsAtBeginningOf(RIL_TEXTLINE)) {
line_num++;
word_num = 0;
tsv_str<fim_middle>tsv_str += "\t" + std::to_string(word_num)<fim_end>
|
ng(rect_width_);
tsv_str += "\t" + std::to_string(rect_height_);
tsv_str += "\t-1\t\n";
const std::unique_ptr</*non-const*/ ResultIterator> res_it(GetIterator());
while (!res_it->Empty(RIL_BLOCK)) {
if (res_it->Empty(RIL_WORD)) {
res_it->Next(RIL_WORD);
continue;
}
// Add rows for any new block/paragraph/textline.
if (res_it->IsAtBeginningOf(RIL_BLOCK)) {
block_num++;
par_num = 0;
line_num = 0;
word_num = 0;
tsv_str += "2\t" + std::to_string(page_num); // level 2 - block
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
|
tsv_str += "\t" + std::to_string(word_num)
|
;
AddBoxToTSV(res_it.get(), RIL_BLOCK, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for block
}
if (res_it->IsAtBeginningOf(RIL_PARA)) {
par_num++;
line_num = 0;
word_num = 0;
tsv_str += "3\t" + std::to_string(page_num); // level 3 - paragraph
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_PARA, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for para
}
if (res_it->IsAtBeginningOf(RIL_TEXTLINE)) {
line_num++;
word_num = 0;
tsv_str
|
ast_based
|
<fim_prefix> if (p_ofs_valid) {
if (editor->snap_keys->is_pressed() && editor->step->get_value() != 0) {
insert_pos = editor->snap_time(insert_pos);
}
}
float dst_time = key.time + insert_pos;
int existing_idx = animation->track_find_key(selected_track, dst_time, Animation::FIND_MODE_APPROX);
Variant value = key.value;
if (key.track_type != Animation::TYPE_BEZIER) {
value = animation->make_default_bezier_key(key.value);
}
undo_redo->add_do_method(animation.ptr(), "track_insert_key", selected_track, dst_time, value, key.transition);
undo_redo->add_undo_method(animation.ptr(), "track_remove_key_at_time", selected_track, dst_time);
<fim_suffix>
if (existing_idx != -1) {
undo_redo->add_undo_method(animation.ptr(), "track_insert_key", selected_track, dst_time, animation->track_get_key_value(selected_track, existing_idx), animation->track_get_key_transition(selected_track, existing_idx));
}
}
undo_redo->add_do_method(this, "_clear_selection_for_anim", animation);
undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation);
// Reselect pasted.
int i = 0;
for (const Pair<int, float> &E : new_selection_values) {
undo_redo->add_do_method(this, "_select_at_anim", animation, E.first, E.second, i == 0);
i++;
}
i = 0;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {<fim_middle> Pair<int, float> p;
p.first = selected_track;
p.second = dst_time;
new_selection_values.push_back(p);<fim_end>
|
if (p_ofs_valid) {
if (editor->snap_keys->is_pressed() && editor->step->get_value() != 0) {
insert_pos = editor->snap_time(insert_pos);
}
}
float dst_time = key.time + insert_pos;
int existing_idx = animation->track_find_key(selected_track, dst_time, Animation::FIND_MODE_APPROX);
Variant value = key.value;
if (key.track_type != Animation::TYPE_BEZIER) {
value = animation->make_default_bezier_key(key.value);
}
undo_redo->add_do_method(animation.ptr(), "track_insert_key", selected_track, dst_time, value, key.transition);
undo_redo->add_undo_method(animation.ptr(), "track_remove_key_at_time", selected_track, dst_time);
|
Pair<int, float> p;
p.first = selected_track;
p.second = dst_time;
new_selection_values.push_back(p);
|
if (existing_idx != -1) {
undo_redo->add_undo_method(animation.ptr(), "track_insert_key", selected_track, dst_time, animation->track_get_key_value(selected_track, existing_idx), animation->track_get_key_transition(selected_track, existing_idx));
}
}
undo_redo->add_do_method(this, "_clear_selection_for_anim", animation);
undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation);
// Reselect pasted.
int i = 0;
for (const Pair<int, float> &E : new_selection_values) {
undo_redo->add_do_method(this, "_select_at_anim", animation, E.first, E.second, i == 0);
i++;
}
i = 0;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
|
random
|
<fim_prefix> LOG_INF("%s: n_ctx: %u\n", __func__, params->n_ctx);
LOG_INF("%s: n_embd: %u\n", __func__, params->n_embd);
LOG_INF("%s: n_mult: %u\n", __func__, params->n_mult);
LOG_INF("%s: n_head: %u\n", __func__, params->n_head);
LOG_INF("%s: n_head_kv: %u\n", __func__, params->n_head_kv);
LOG_INF("%s: n_ff: %u\n", __func__, params->n_ff);
LOG_INF("%s: n_layer: %u\n", __func__, params->n_layer);
LOG_INF("%s: n_rot: %u\n", __func__, params->n_rot);
}
static void print_tensor_info(const struct ggml_context * ctx) {
for (auto * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {<fim_suffix> if (i > 0) { LOG_INF("x "); }
LOG_INF("[%" PRId64 "] ", t->ne[i]);
total *= t->ne[i];
}
if (i > 1) { LOG_INF("= [%" PRId64 "] ", total); }
LOG_INF("float space for %s\n", ggml_get_name(t));
}
}
static void init_model(struct my_llama_model * model) {
const auto & hparams = model->hparams;
const uint32_t n_embd = hparams.n_embd;
const uint32_t n_layer = hparams.n_layer;
const uint32_t n_vocab = hparams.n_vocab;
const uint32_t n_multiqueries = hparams.n_head_kv <= 0 || hparams.n_head_kv >= hparams.n_head ? 1 : hparams.n_head / hparams.n_head_kv;
const uint32_t n_ff = hparams.n_ff;<fim_middle> LOG_INF("%s: Allocating ", __func__);
int64_t total = 1;
int i = 0;
for (; i < ggml_n_dims(t); ++i) {<fim_end>
|
LOG_INF("%s: n_ctx: %u\n", __func__, params->n_ctx);
LOG_INF("%s: n_embd: %u\n", __func__, params->n_embd);
LOG_INF("%s: n_mult: %u\n", __func__, params->n_mult);
LOG_INF("%s: n_head: %u\n", __func__, params->n_head);
LOG_INF("%s: n_head_kv: %u\n", __func__, params->n_head_kv);
LOG_INF("%s: n_ff: %u\n", __func__, params->n_ff);
LOG_INF("%s: n_layer: %u\n", __func__, params->n_layer);
LOG_INF("%s: n_rot: %u\n", __func__, params->n_rot);
}
static void print_tensor_info(const struct ggml_context * ctx) {
for (auto * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
|
LOG_INF("%s: Allocating ", __func__);
int64_t total = 1;
int i = 0;
for (; i < ggml_n_dims(t); ++i) {
|
if (i > 0) { LOG_INF("x "); }
LOG_INF("[%" PRId64 "] ", t->ne[i]);
total *= t->ne[i];
}
if (i > 1) { LOG_INF("= [%" PRId64 "] ", total); }
LOG_INF("float space for %s\n", ggml_get_name(t));
}
}
static void init_model(struct my_llama_model * model) {
const auto & hparams = model->hparams;
const uint32_t n_embd = hparams.n_embd;
const uint32_t n_layer = hparams.n_layer;
const uint32_t n_vocab = hparams.n_vocab;
const uint32_t n_multiqueries = hparams.n_head_kv <= 0 || hparams.n_head_kv >= hparams.n_head ? 1 : hparams.n_head / hparams.n_head_kv;
const uint32_t n_ff = hparams.n_ff;
|
random
|
<fim_prefix>ntSession(scope, "") {}
ClientSession::ClientSession(const Scope& scope,
const SessionOptions& session_options) {
Session* new_session;
absl::Status status = NewSession(session_options, &new_session);
TF_CHECK_OK(status) << status;
impl_.reset(new Impl(new_session, scope.graph_as_shared_ptr()));
CHECK_NOTNULL(impl()->session_.get());
}
// Define destructor here so we can forward declare `Impl` in client_session.h.
// If we define a dtor in the header file or use the default dtor,
// unique_ptr<Impl> needs the complete type.
ClientSession::~ClientSession() {}
SessionOptions ClientSession::Impl::MakeDefaultSessionOptions(
const string& target) {
<fim_suffix>
return options;
}
absl::Status ClientSession::Run(const std::vector<Output>& fetch_outputs,
std::vector<Tensor>* outputs) const {
return Run(FeedType{}, fetch_outputs, {}, outputs);
}
absl::Status ClientSession::Run(const FeedType& inputs,
const std::vector<Output>& fetch_outputs,
std::vector<Tensor>* outputs) const {
return Run(inputs, fetch_outputs, {}, outputs);
}
absl::Status ClientSession::Run(const FeedType& inputs,
const std::vector<Output>& fetch_outputs,
const std::vector<Operation>& run_outputs,
<fim_middle>SessionOptions options;
options.env = Env::Default();
options.target = target;<fim_end>
|
ntSession(scope, "") {}
ClientSession::ClientSession(const Scope& scope,
const SessionOptions& session_options) {
Session* new_session;
absl::Status status = NewSession(session_options, &new_session);
TF_CHECK_OK(status) << status;
impl_.reset(new Impl(new_session, scope.graph_as_shared_ptr()));
CHECK_NOTNULL(impl()->session_.get());
}
// Define destructor here so we can forward declare `Impl` in client_session.h.
// If we define a dtor in the header file or use the default dtor,
// unique_ptr<Impl> needs the complete type.
ClientSession::~ClientSession() {}
SessionOptions ClientSession::Impl::MakeDefaultSessionOptions(
const string& target) {
|
SessionOptions options;
options.env = Env::Default();
options.target = target;
|
return options;
}
absl::Status ClientSession::Run(const std::vector<Output>& fetch_outputs,
std::vector<Tensor>* outputs) const {
return Run(FeedType{}, fetch_outputs, {}, outputs);
}
absl::Status ClientSession::Run(const FeedType& inputs,
const std::vector<Output>& fetch_outputs,
std::vector<Tensor>* outputs) const {
return Run(inputs, fetch_outputs, {}, outputs);
}
absl::Status ClientSession::Run(const FeedType& inputs,
const std::vector<Output>& fetch_outputs,
const std::vector<Operation>& run_outputs,
|
ast_based
|
<fim_prefix>#pragma once
#include <filesystem>
#include <stdexcept>
#include <string>
#include <utility>
namespace fs = std::filesystem;
class Dlhandle {
void *chandle = nullptr;
public:
class Exception : public std::runtime_error {
public:
using std::runtime_error::runtime_error;
};
Dlhandle() = default;
Dlhandle(const fs::path &fpath);<fim_suffix> Dlhandle(Dlhandle &&o)
: chandle(o.chandle)
{
o.chandle = nullptr;
}
~Dlhandle();
Dlhandle &operator=(Dlhandle &&o) {
chandle = std::exchange(o.chandle, nullptr);
return *this;
}
template <typename T>
T *get(const std::string &symbol) const {
return reinterpret_cast<T *>(get_internal(symbol.c_str()));
}
auto get_fnc(const std::string &symbol) const {
return get<void*(...)>(symbol);
}
private:
void *get_internal(const char *symbol) const;
};
<fim_middle> Dlhandle(const Dlhandle &o) = delete;<fim_end>
|
#pragma once
#include <filesystem>
#include <stdexcept>
#include <string>
#include <utility>
namespace fs = std::filesystem;
class Dlhandle {
void *chandle = nullptr;
public:
class Exception : public std::runtime_error {
public:
using std::runtime_error::runtime_error;
};
Dlhandle() = default;
Dlhandle(const fs::path &fpath);
|
Dlhandle(const Dlhandle &o) = delete;
|
Dlhandle(Dlhandle &&o)
: chandle(o.chandle)
{
o.chandle = nullptr;
}
~Dlhandle();
Dlhandle &operator=(Dlhandle &&o) {
chandle = std::exchange(o.chandle, nullptr);
return *this;
}
template <typename T>
T *get(const std::string &symbol) const {
return reinterpret_cast<T *>(get_internal(symbol.c_str()));
}
auto get_fnc(const std::string &symbol) const {
return get<void*(...)>(symbol);
}
private:
void *get_internal(const char *symbol) const;
};
|
random
|
<fim_prefix>n, frame);
}
if (t.is_null()) {
return Rect2();
}
Size2 s = t->get_size();
Point2 ofs = offset;
if (centered) {
ofs -= s / 2;
}
if (s == Size2(0, 0)) {
s = Size2(1, 1);
}
return Rect2(ofs, s);
}
void AnimatedSprite2D::_validate_property(PropertyInfo &p_property) const {
if (frames.is_null()) {
return;
}
if (!Engine::get_singleton()->is_editor_hint()) {
if (p_property.name == "frame" && playing) {
p_property.usage = PROPERTY_USAGE_EDITOR | PROPERTY_USAGE_READ_ONLY;
}
return;
}
if (p_property.name == "animation") {
List<StringName> names;
frames->get_animation_list(&names);
names.sort_custom<StringName::AlphCompare>();
bool current_found = false;
<fim_suffix>
for (const StringName &E : names) {
if (!is_first_element) {
p_property.hint_string += ",";
} else {
is_first_element = false;
}
p_property.hint_string += String(E);
if (animation == E) {
current_found = true;
}
}
if (!current_found) {
if (p_property.hint_string.is_empty()) {
p_property.hint_string = String(animation);
} else {
p_property.hint_string = String(animation) + "," + p_property.hint_string;
}
}
return;
}
if (p_property.name == "frame") {
if (playing) {
p_property.usage = PROPERTY_USAGE_EDITOR | PROPERTY_USAGE_READ_ONLY;
return;
}
p_property.hint = PROPERTY_HINT_RANGE;
if (frames->has_animation(animation<fim_middle>bool is_first_element = true;<fim_end>
|
n, frame);
}
if (t.is_null()) {
return Rect2();
}
Size2 s = t->get_size();
Point2 ofs = offset;
if (centered) {
ofs -= s / 2;
}
if (s == Size2(0, 0)) {
s = Size2(1, 1);
}
return Rect2(ofs, s);
}
void AnimatedSprite2D::_validate_property(PropertyInfo &p_property) const {
if (frames.is_null()) {
return;
}
if (!Engine::get_singleton()->is_editor_hint()) {
if (p_property.name == "frame" && playing) {
p_property.usage = PROPERTY_USAGE_EDITOR | PROPERTY_USAGE_READ_ONLY;
}
return;
}
if (p_property.name == "animation") {
List<StringName> names;
frames->get_animation_list(&names);
names.sort_custom<StringName::AlphCompare>();
bool current_found = false;
|
bool is_first_element = true;
|
for (const StringName &E : names) {
if (!is_first_element) {
p_property.hint_string += ",";
} else {
is_first_element = false;
}
p_property.hint_string += String(E);
if (animation == E) {
current_found = true;
}
}
if (!current_found) {
if (p_property.hint_string.is_empty()) {
p_property.hint_string = String(animation);
} else {
p_property.hint_string = String(animation) + "," + p_property.hint_string;
}
}
return;
}
if (p_property.name == "frame") {
if (playing) {
p_property.usage = PROPERTY_USAGE_EDITOR | PROPERTY_USAGE_READ_ONLY;
return;
}
p_property.hint = PROPERTY_HINT_RANGE;
if (frames->has_animation(animation
|
ast_based
|
<fim_prefix> Run(grpc_completion_queue_functor* cb, int) {
auto* callback = static_cast<ShutdownCallback*>(cb);
delete callback->cq_;
delete callback;
}
private:
grpc::CompletionQueue* cq_ = nullptr;
};
} // namespace
::grpc::CompletionQueue* Channel::CallbackCQ() {
// TODO(vjpai): Consider using a single global CQ for the default CQ
// if there is no explicit per-channel CQ registered
CompletionQueue* callback_cq = callback_cq_.load(std::memory_order_acquire);
if (callback_cq != nullptr) {
return callback_cq;
}
// The callback_cq_ wasn't already set, so grab a lock and set it up exactly
// once for this channel.
grpc::internal::MutexLock l(&mu_);
callback_cq = <fim_suffix>;
if (callback_cq == nullptr) {
if (grpc_iomgr_run_in_background()) {
// gRPC-core provides the backing needed for the preferred CQ type
auto* shutdown_callback = new ShutdownCallback;
callback_cq = new grpc::CompletionQueue(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_CALLBACK, GRPC_CQ_DEFAULT_POLLING,
shutdown_callback});
// Transfer ownership of the new cq to its own shutdown callback
shutdown_callback->TakeCQ(callback_cq);
} else {
// Otherwise we need to use the alternative CQ variant
callback_cq = CompletionQueue::CallbackAlternativeCQ();
}
callback_cq_.store(callback_cq, std::memory<fim_middle>callback_cq_.load(std::memory_order_relaxed)<fim_end>
|
Run(grpc_completion_queue_functor* cb, int) {
auto* callback = static_cast<ShutdownCallback*>(cb);
delete callback->cq_;
delete callback;
}
private:
grpc::CompletionQueue* cq_ = nullptr;
};
} // namespace
::grpc::CompletionQueue* Channel::CallbackCQ() {
// TODO(vjpai): Consider using a single global CQ for the default CQ
// if there is no explicit per-channel CQ registered
CompletionQueue* callback_cq = callback_cq_.load(std::memory_order_acquire);
if (callback_cq != nullptr) {
return callback_cq;
}
// The callback_cq_ wasn't already set, so grab a lock and set it up exactly
// once for this channel.
grpc::internal::MutexLock l(&mu_);
callback_cq =
|
callback_cq_.load(std::memory_order_relaxed)
|
;
if (callback_cq == nullptr) {
if (grpc_iomgr_run_in_background()) {
// gRPC-core provides the backing needed for the preferred CQ type
auto* shutdown_callback = new ShutdownCallback;
callback_cq = new grpc::CompletionQueue(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_CALLBACK, GRPC_CQ_DEFAULT_POLLING,
shutdown_callback});
// Transfer ownership of the new cq to its own shutdown callback
shutdown_callback->TakeCQ(callback_cq);
} else {
// Otherwise we need to use the alternative CQ variant
callback_cq = CompletionQueue::CallbackAlternativeCQ();
}
callback_cq_.store(callback_cq, std::memory
|
ast_based
|
<fim_prefix>// stdin. We'll still do our best if the user likes pipes.
bool TessBaseAPI::ProcessPagesInternal(const char *filename, const char *retry_config,
int timeout_millisec, TessResultRenderer *renderer) {
bool stdInput = !strcmp(filename, "stdin") || !strcmp(filename, "-");
if (stdInput) {
#ifdef WIN32
if (_setmode(_fileno(stdin), _O_BINARY) == -1)
tprintf("ERROR: cin to binary: %s", strerror(errno));
#endif // WIN32
}
if (stream_filelist) {
return ProcessPagesFileList(stdin, nullptr, retry_config, timeout_millisec, renderer,
tesseract_->tessedit_page_number);
}
<fim_suffix> // That means any data in stdin must be buffered, to make it
// seekable.
std::string buf;
const l_uint8 *data = nullptr;
if (stdInput) {
buf.assign((std::istreambuf_iterator<char>(std::cin)), (std::istreambuf_iterator<char>()));
data = reinterpret_cast<const l_uint8 *>(buf.data());
} else if (strstr(filename, "://") != nullptr) {
// Get image or image list by URL.
#ifdef HAVE_LIBCURL
CURL *curl = curl_easy_init();
if (curl == nullptr) {
fprintf(stderr, "Error, curl_easy_init failed\n");
return false;
} else {
CURLcode curlcode;
auto error = [curl, &curlcode](const char *function) {<fim_middle> // At this point we are officially in autodection territory.<fim_end>
|
// stdin. We'll still do our best if the user likes pipes.
bool TessBaseAPI::ProcessPagesInternal(const char *filename, const char *retry_config,
int timeout_millisec, TessResultRenderer *renderer) {
bool stdInput = !strcmp(filename, "stdin") || !strcmp(filename, "-");
if (stdInput) {
#ifdef WIN32
if (_setmode(_fileno(stdin), _O_BINARY) == -1)
tprintf("ERROR: cin to binary: %s", strerror(errno));
#endif // WIN32
}
if (stream_filelist) {
return ProcessPagesFileList(stdin, nullptr, retry_config, timeout_millisec, renderer,
tesseract_->tessedit_page_number);
}
|
// At this point we are officially in autodection territory.
|
// That means any data in stdin must be buffered, to make it
// seekable.
std::string buf;
const l_uint8 *data = nullptr;
if (stdInput) {
buf.assign((std::istreambuf_iterator<char>(std::cin)), (std::istreambuf_iterator<char>()));
data = reinterpret_cast<const l_uint8 *>(buf.data());
} else if (strstr(filename, "://") != nullptr) {
// Get image or image list by URL.
#ifdef HAVE_LIBCURL
CURL *curl = curl_easy_init();
if (curl == nullptr) {
fprintf(stderr, "Error, curl_easy_init failed\n");
return false;
} else {
CURLcode curlcode;
auto error = [curl, &curlcode](const char *function) {
|
random
|
<fim_prefix>/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#include "engine.h"
#include "core/authors.gen.h"
#include "core/config/project_settings.h"
#include "core/donors.gen.h"
#include "core/license.gen.h"
#include "core/variant/typed_array.h"<fim_suffix>#include "servers/rendering/rendering_device.h"
void Engine::set_physics_ticks_per_second(int p_ips) {
ERR_FAIL_COND_MSG(p_ips <= 0, "Engine iterations per second must be greater than 0.");
ips = p_ips;
}
int Engine::get_physics_ticks_per_second() const {
return ips;
}
void Engine::set_max_physics_steps_per_frame(int p_max_physics_steps) {
ERR_FAIL_COND_MSG(p_max_physics_steps <= 0, "Maximum number of physics steps per frame must be greater than 0.");
max_physics_steps_per_frame = p_max_physics_steps;
}
int Engine::get_max_physics_steps_per_frame() const {
return max_physics_steps_per_frame;
}
void Engine::set_physics_jitter_fix(double p_threshold) {
if (p_threshold < 0) {<fim_middle>#include "core/version.h"<fim_end>
|
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#include "engine.h"
#include "core/authors.gen.h"
#include "core/config/project_settings.h"
#include "core/donors.gen.h"
#include "core/license.gen.h"
#include "core/variant/typed_array.h"
|
#include "core/version.h"
|
#include "servers/rendering/rendering_device.h"
void Engine::set_physics_ticks_per_second(int p_ips) {
ERR_FAIL_COND_MSG(p_ips <= 0, "Engine iterations per second must be greater than 0.");
ips = p_ips;
}
int Engine::get_physics_ticks_per_second() const {
return ips;
}
void Engine::set_max_physics_steps_per_frame(int p_max_physics_steps) {
ERR_FAIL_COND_MSG(p_max_physics_steps <= 0, "Maximum number of physics steps per frame must be greater than 0.");
max_physics_steps_per_frame = p_max_physics_steps;
}
int Engine::get_max_physics_steps_per_frame() const {
return max_physics_steps_per_frame;
}
void Engine::set_physics_jitter_fix(double p_threshold) {
if (p_threshold < 0) {
|
random
|
<fim_prefix> if (!block_it.data()->pdblk.poly_block()->IsText()) {
continue;
}
++num_blocks;
}
if (!num_blocks) {
tprintf("WARNING: Found no blocks\n");
return;
}
*block_orientation = new int[num_blocks];
*vertical_writing = new bool[num_blocks];
block_it.move_to_first();
int i = 0;
for (block_it.mark_cycle_pt(); !block_it.cycled_list(); block_it.forward()) {
if (!block_it.data()->pdblk.poly_block()->IsText()) {
continue;
}
FCOORD re_rotation = block_it.data()->re_rotation();
float re_theta = re_rotation.angle();
FCOORD classify_rotation = block_it.data()->classify_rotation();
float classify_theta = classify_rotation.angle();<fim_suffix> }
int num_rotations = static_cast<int>(rot_theta + 0.5);
(*block_orientation)[i] = num_rotations;
// The classify_rotation is non-zero only if the text has vertical
// writing direction.
(*vertical_writing)[i] = classify_rotation.y() != 0.0f;
++i;
}
}
void TessBaseAPI::DetectParagraphs(bool after_text_recognition) {
int debug_level = 0;
GetIntVariable("paragraph_debug_level", &debug_level);
if (paragraph_models_ == nullptr) {
paragraph_models_ = new std::vector<ParagraphModel *>;
}
MutableIterator *result_it = GetMutableIterator();
do { // Detect paragraphs for this block
std::vector<ParagraphModel *> models;<fim_middle> double rot_theta = -(re_theta - classify_theta) * 2.0 / M_PI;
if (rot_theta < 0) {
rot_theta += 4;<fim_end>
|
if (!block_it.data()->pdblk.poly_block()->IsText()) {
continue;
}
++num_blocks;
}
if (!num_blocks) {
tprintf("WARNING: Found no blocks\n");
return;
}
*block_orientation = new int[num_blocks];
*vertical_writing = new bool[num_blocks];
block_it.move_to_first();
int i = 0;
for (block_it.mark_cycle_pt(); !block_it.cycled_list(); block_it.forward()) {
if (!block_it.data()->pdblk.poly_block()->IsText()) {
continue;
}
FCOORD re_rotation = block_it.data()->re_rotation();
float re_theta = re_rotation.angle();
FCOORD classify_rotation = block_it.data()->classify_rotation();
float classify_theta = classify_rotation.angle();
|
double rot_theta = -(re_theta - classify_theta) * 2.0 / M_PI;
if (rot_theta < 0) {
rot_theta += 4;
|
}
int num_rotations = static_cast<int>(rot_theta + 0.5);
(*block_orientation)[i] = num_rotations;
// The classify_rotation is non-zero only if the text has vertical
// writing direction.
(*vertical_writing)[i] = classify_rotation.y() != 0.0f;
++i;
}
}
void TessBaseAPI::DetectParagraphs(bool after_text_recognition) {
int debug_level = 0;
GetIntVariable("paragraph_debug_level", &debug_level);
if (paragraph_models_ == nullptr) {
paragraph_models_ = new std::vector<ParagraphModel *>;
}
MutableIterator *result_it = GetMutableIterator();
do { // Detect paragraphs for this block
std::vector<ParagraphModel *> models;
|
random
|
<fim_prefix>x));
}
}
undo_redo->add_do_method(this, "_clear_selection_for_anim", animation);
undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation);
// Reselect pasted.
int i = 0;
for (const Pair<int, float> &E : new_selection_values) {
undo_redo->add_do_method(this, "_select_at_anim", animation, E.first, E.second, i == 0);
i++;
}
i = 0;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, animation->track_get_key_time(E->get().first, E->get().second), i == 0);
i++;
}
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
if (ape) {
<fim_suffix>;
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->add_do_method(this, "queue_redraw");
undo_redo->add_undo_method(this, "queue_redraw");
undo_redo->commit_action();
}
}
void AnimationBezierTrackEdit::delete_selection() {
if (selection.size()) {
EditorUndoRedoManager *undo_redo = EditorUndoRedoManager::get_singleton();
undo_redo->create_action(TTR("Animation Delete Keys"));
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
undo_redo->add_do_method(animation.ptr(), "track_remove_key", E->get().first, E->get().second);
undo_redo->add_undo_method(animation.ptr(), "track_insert_key", E->get().first, animation->track_get_<fim_middle>undo_redo->add_do_method(ape, "_animation_update_key_frame")<fim_end>
|
x));
}
}
undo_redo->add_do_method(this, "_clear_selection_for_anim", animation);
undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation);
// Reselect pasted.
int i = 0;
for (const Pair<int, float> &E : new_selection_values) {
undo_redo->add_do_method(this, "_select_at_anim", animation, E.first, E.second, i == 0);
i++;
}
i = 0;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, animation->track_get_key_time(E->get().first, E->get().second), i == 0);
i++;
}
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
if (ape) {
|
undo_redo->add_do_method(ape, "_animation_update_key_frame")
|
;
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->add_do_method(this, "queue_redraw");
undo_redo->add_undo_method(this, "queue_redraw");
undo_redo->commit_action();
}
}
void AnimationBezierTrackEdit::delete_selection() {
if (selection.size()) {
EditorUndoRedoManager *undo_redo = EditorUndoRedoManager::get_singleton();
undo_redo->create_action(TTR("Animation Delete Keys"));
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
undo_redo->add_do_method(animation.ptr(), "track_remove_key", E->get().first, E->get().second);
undo_redo->add_undo_method(animation.ptr(), "track_insert_key", E->get().first, animation->track_get_
|
ast_based
|
<fim_prefix>lic std::runtime_error {
public:
using std::runtime_error::runtime_error;
};
class UnsupportedModelError: public std::runtime_error {
public:
using std::runtime_error::runtime_error;
};
struct GPUDevice {
const char *backend;
int index;
int type;
size_t heapSize;
std::string name;
std::string vendor;
GPUDevice(const char *backend, int index, int type, size_t heapSize, std::string name, std::string vendor):
backend(backend), index(index), type(type), heapSize(heapSize), name(std::move(name)),
vendor(std::move(vendor)) {}
std::string selectionName() const
<fim_suffix>
std::string backendName() const { return backendIdToName(backend); }
static std::string backendIdToName(const std::string &backend) { return s_backendNames.at(backend); }
static std::string updateSelectionName(const std::string &name) {
if (name == "Auto" || name == "CPU" || name == "Metal")
return name;
auto it = std::find_if(s_backendNames.begin(), s_backendNames.end(), [&name](const auto &entry) {
return name.starts_with(entry.second + ": ");
});
if (it != s_backendNames.end())
return name;
return "Vulkan: " + name; // previously, there were only Vulkan d<fim_middle>{
assert(backend == "cuda"s || backend == "kompute"s);
return backendName() + ": " + name;
}<fim_end>
|
lic std::runtime_error {
public:
using std::runtime_error::runtime_error;
};
class UnsupportedModelError: public std::runtime_error {
public:
using std::runtime_error::runtime_error;
};
struct GPUDevice {
const char *backend;
int index;
int type;
size_t heapSize;
std::string name;
std::string vendor;
GPUDevice(const char *backend, int index, int type, size_t heapSize, std::string name, std::string vendor):
backend(backend), index(index), type(type), heapSize(heapSize), name(std::move(name)),
vendor(std::move(vendor)) {}
std::string selectionName() const
|
{
assert(backend == "cuda"s || backend == "kompute"s);
return backendName() + ": " + name;
}
|
std::string backendName() const { return backendIdToName(backend); }
static std::string backendIdToName(const std::string &backend) { return s_backendNames.at(backend); }
static std::string updateSelectionName(const std::string &name) {
if (name == "Auto" || name == "CPU" || name == "Metal")
return name;
auto it = std::find_if(s_backendNames.begin(), s_backendNames.end(), [&name](const auto &entry) {
return name.starts_with(entry.second + ": ");
});
if (it != s_backendNames.end())
return name;
return "Vulkan: " + name; // previously, there were only Vulkan d
|
ast_based
|
<fim_prefix>/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#ifdef ACCESSKIT_ENABLED
#include "accessibility_driver_accesskit.h"
#include "core/config/project_settings.h"
#include "core/version.h"
#include "servers/text_server.h"
AccessibilityDriverAccessKit *AccessibilityDriverAccessKit::singleton = nullptr;
_FORCE_INLINE_ accesskit_role AccessibilityDriverAccessKit::_accessibility_role(DisplayServer::AccessibilityRole p_role) const {
if (role_map.has(p_role)) {
return role_map[p_role];
}
return ACCESSKIT_ROLE_UNKNOWN;
}<fim_suffix> if (action_map.has(p_action)) {
return action_map[p_action];
}
return ACCESSKIT_ACTION_CLICK;
}
bool AccessibilityDriverAccessKit::window_create(DisplayServer::WindowID p_window_id, void *p_handle) {
ERR_FAIL_COND_V(windows.has(p_window_id), false);
WindowData &wd = windows[p_window_id];
AccessibilityElement *ae = memnew(AccessibilityElement);
ae->role = ACCESSKIT_ROLE_WINDOW;
ae->window_id = p_window_id;
wd.root_id = rid_owner.make_rid(ae);
#ifdef WINDOWS_ENABLED<fim_middle>
_FORCE_INLINE_ accesskit_action AccessibilityDriverAccessKit::_accessibility_action(DisplayServer::AccessibilityAction p_action) const {<fim_end>
|
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#ifdef ACCESSKIT_ENABLED
#include "accessibility_driver_accesskit.h"
#include "core/config/project_settings.h"
#include "core/version.h"
#include "servers/text_server.h"
AccessibilityDriverAccessKit *AccessibilityDriverAccessKit::singleton = nullptr;
_FORCE_INLINE_ accesskit_role AccessibilityDriverAccessKit::_accessibility_role(DisplayServer::AccessibilityRole p_role) const {
if (role_map.has(p_role)) {
return role_map[p_role];
}
return ACCESSKIT_ROLE_UNKNOWN;
}
|
_FORCE_INLINE_ accesskit_action AccessibilityDriverAccessKit::_accessibility_action(DisplayServer::AccessibilityAction p_action) const {
|
if (action_map.has(p_action)) {
return action_map[p_action];
}
return ACCESSKIT_ACTION_CLICK;
}
bool AccessibilityDriverAccessKit::window_create(DisplayServer::WindowID p_window_id, void *p_handle) {
ERR_FAIL_COND_V(windows.has(p_window_id), false);
WindowData &wd = windows[p_window_id];
AccessibilityElement *ae = memnew(AccessibilityElement);
ae->role = ACCESSKIT_ROLE_WINDOW;
ae->window_id = p_window_id;
wd.root_id = rid_owner.make_rid(ae);
#ifdef WINDOWS_ENABLED
|
random
|
<fim_prefix>_rect.position + Vector2(-inner_ofs, selection_rect.size.height + inner_ofs), accent, limit, right_limit);
_draw_line_clipped(selection_rect.position + Vector2(selection_rect.size.width + inner_ofs, -inner_ofs), selection_rect.position + selection_rect.size + Vector2(inner_ofs, inner_ofs), accent, limit, right_limit);
}
selection_handles_rect.position = selection_rect.position - Vector2(outer_ofs, outer_ofs);
selection_handles_rect.size = selection_rect.size + Vector2(outer_ofs, outer_ofs) * 2;
}
if (box_selecting) {
Vector2 bs_from = box_selection_from;
Vector2 bs_to = box_selection_to;
if (bs_from.x > bs_to.x) {
SWAP(bs_from.x, bs_to.x);
}
<fim_suffix>
draw_rect(
Rect2(bs_from, bs_to - bs_from),
get_theme_color(SNAME("box_selection_fill_color"), EditorStringName(Editor)));
draw_rect(
Rect2(bs_from, bs_to - bs_from),
get_theme_color(SNAME("box_selection_stroke_color"), EditorStringName(Editor)),
false,
Math::round(EDSCALE));
}
} break;
}
}
// Check if a track is displayed in the bezier editor (track type = bezier and track not filtered).
bool AnimationBezierTrackEdit::_is_track_displayed(int p_track_index) {
if (animation->track_get_type(p_track_index) != Animation::TrackType::TYPE_BEZIER) {
return false;
}
if (is_filtered) {
String path = String(animation->track_get_path(p_track_i<fim_middle>if (bs_from.y > bs_to.y) {
SWAP(bs_from.y, bs_to.y);
}<fim_end>
|
_rect.position + Vector2(-inner_ofs, selection_rect.size.height + inner_ofs), accent, limit, right_limit);
_draw_line_clipped(selection_rect.position + Vector2(selection_rect.size.width + inner_ofs, -inner_ofs), selection_rect.position + selection_rect.size + Vector2(inner_ofs, inner_ofs), accent, limit, right_limit);
}
selection_handles_rect.position = selection_rect.position - Vector2(outer_ofs, outer_ofs);
selection_handles_rect.size = selection_rect.size + Vector2(outer_ofs, outer_ofs) * 2;
}
if (box_selecting) {
Vector2 bs_from = box_selection_from;
Vector2 bs_to = box_selection_to;
if (bs_from.x > bs_to.x) {
SWAP(bs_from.x, bs_to.x);
}
|
if (bs_from.y > bs_to.y) {
SWAP(bs_from.y, bs_to.y);
}
|
draw_rect(
Rect2(bs_from, bs_to - bs_from),
get_theme_color(SNAME("box_selection_fill_color"), EditorStringName(Editor)));
draw_rect(
Rect2(bs_from, bs_to - bs_from),
get_theme_color(SNAME("box_selection_stroke_color"), EditorStringName(Editor)),
false,
Math::round(EDSCALE));
}
} break;
}
}
// Check if a track is displayed in the bezier editor (track type = bezier and track not filtered).
bool AnimationBezierTrackEdit::_is_track_displayed(int p_track_index) {
if (animation->track_get_type(p_track_index) != Animation::TrackType::TYPE_BEZIER) {
return false;
}
if (is_filtered) {
String path = String(animation->track_get_path(p_track_i
|
ast_based
|
<fim_prefix>(0, SEEK_SET);
}
}
size_t tell() const {
#ifdef _WIN32
__int64 ret = _ftelli64(fp);
#else
long ret = std::ftell(fp);
#endif
GGML_ASSERT(ret != -1); // this really shouldn't fail
return (size_t) ret;
}
void seek(size_t offset, int whence) {
#ifdef _WIN32
int ret = _fseeki64(fp, (__int64) offset, whence);
#else
int ret = std::fseek(fp, (long) offset, whence);
#endif
GGML_ASSERT(ret == 0); // same
}
void read_raw(void * ptr, size_t size) {
if (size == 0) {
return;
}
errno = 0;
std::size_t ret = std::fread(ptr, size, 1, fp);
if (ferror(fp)) {
<fim_suffix>;
}
if (ret != 1) {
die("unexpectedly reached end of file");
}
}
std::uint32_t read_u32() {
std::uint32_t ret;
read_raw(&ret, sizeof(ret));
return ret;
}
std::float_t read_f32() {
std::float_t ret;
read_raw(&ret, sizeof(ret));
return ret;
}
std::string read_string(std::uint32_t len) {
std::vector<char> chars(len);
read_raw(chars.data(), len);
return std::string(chars.data(), len);
}
~my_llama_file() {
if (fp) {
std::fclose(fp);
}
}
};
static bool is_ggml_file(const char * filename) {
my_llama_file file(filename, "r<fim_middle>die_fmt("fread failed: %s", strerror(errno))<fim_end>
|
(0, SEEK_SET);
}
}
size_t tell() const {
#ifdef _WIN32
__int64 ret = _ftelli64(fp);
#else
long ret = std::ftell(fp);
#endif
GGML_ASSERT(ret != -1); // this really shouldn't fail
return (size_t) ret;
}
void seek(size_t offset, int whence) {
#ifdef _WIN32
int ret = _fseeki64(fp, (__int64) offset, whence);
#else
int ret = std::fseek(fp, (long) offset, whence);
#endif
GGML_ASSERT(ret == 0); // same
}
void read_raw(void * ptr, size_t size) {
if (size == 0) {
return;
}
errno = 0;
std::size_t ret = std::fread(ptr, size, 1, fp);
if (ferror(fp)) {
|
die_fmt("fread failed: %s", strerror(errno))
|
;
}
if (ret != 1) {
die("unexpectedly reached end of file");
}
}
std::uint32_t read_u32() {
std::uint32_t ret;
read_raw(&ret, sizeof(ret));
return ret;
}
std::float_t read_f32() {
std::float_t ret;
read_raw(&ret, sizeof(ret));
return ret;
}
std::string read_string(std::uint32_t len) {
std::vector<char> chars(len);
read_raw(chars.data(), len);
return std::string(chars.data(), len);
}
~my_llama_file() {
if (fp) {
std::fclose(fp);
}
}
};
static bool is_ggml_file(const char * filename) {
my_llama_file file(filename, "r
|
ast_based
|
<fim_prefix>oid DeserializeFileDB(const fs::path& path, Data&& data)
{
FILE* file = fsbridge::fopen(path, "rb");
AutoFile filein{file};
if (filein.IsNull()) {
throw DbNotFoundError{};
}
DeserializeDB(filein, data);
}
} // namespace
CBanDB::CBanDB(fs::path ban_list_path)
: m_banlist_dat(ban_list_path + ".dat"),
m_banlist_json(ban_list_path + ".json")
{
}
bool CBanDB::Write(const banmap_t& banSet)
{
std::vector<std::string> errors;
if (common::WriteSettings(m_banlist_json, {{JSON_KEY, BanMapToJson(banSet)}}, errors)) {
return true;
}
for (const auto& err : errors) {
LogError("%s\n", err);
}
return false;
}
bool CBanDB::Read(<fim_suffix>)
{
if (fs::exists(m_banlist_dat)) {
LogWarning("banlist.dat ignored because it can only be read by " CLIENT_NAME " version 22.x. Remove %s to silence this warning.", fs::quoted(fs::PathToString(m_banlist_dat)));
}
// If the JSON banlist does not exist, then recreate it
if (!fs::exists(m_banlist_json)) {
return false;
}
std::map<std::string, common::SettingsValue> settings;
std::vector<std::string> errors;
if (!common::ReadSettings(m_banlist_json, settings, errors)) {
for (const auto& err : errors) {
LogWarning("Cannot load banlist %s: %s", fs::PathToString(m_banlist_json), err);
}
return false;
}
<fim_middle>banmap_t& banSet<fim_end>
|
oid DeserializeFileDB(const fs::path& path, Data&& data)
{
FILE* file = fsbridge::fopen(path, "rb");
AutoFile filein{file};
if (filein.IsNull()) {
throw DbNotFoundError{};
}
DeserializeDB(filein, data);
}
} // namespace
CBanDB::CBanDB(fs::path ban_list_path)
: m_banlist_dat(ban_list_path + ".dat"),
m_banlist_json(ban_list_path + ".json")
{
}
bool CBanDB::Write(const banmap_t& banSet)
{
std::vector<std::string> errors;
if (common::WriteSettings(m_banlist_json, {{JSON_KEY, BanMapToJson(banSet)}}, errors)) {
return true;
}
for (const auto& err : errors) {
LogError("%s\n", err);
}
return false;
}
bool CBanDB::Read(
|
banmap_t& banSet
|
)
{
if (fs::exists(m_banlist_dat)) {
LogWarning("banlist.dat ignored because it can only be read by " CLIENT_NAME " version 22.x. Remove %s to silence this warning.", fs::quoted(fs::PathToString(m_banlist_dat)));
}
// If the JSON banlist does not exist, then recreate it
if (!fs::exists(m_banlist_json)) {
return false;
}
std::map<std::string, common::SettingsValue> settings;
std::vector<std::string> errors;
if (!common::ReadSettings(m_banlist_json, settings, errors)) {
for (const auto& err : errors) {
LogWarning("Cannot load banlist %s: %s", fs::PathToString(m_banlist_json), err);
}
return false;
}
|
ast_based
|
<fim_prefix> mCalibData->cameraMatrix.at<double>(0,0) =
mCalibData->cameraMatrix.at<double>(1,1);
}
}
if(!(mCalibFlags & cv::CALIB_ZERO_TANGENT_DIST)) {
const double eps = 0.005;
if(fabs(mCalibData->distCoeffs.at<double>(2)) < eps &&
fabs(mCalibData->distCoeffs.at<double>(3)) < eps)
mCalibFlags |= cv::CALIB_ZERO_TANGENT_DIST;
}
if(!(mCalibFlags & cv::CALIB_FIX_K1)) {
const double eps = 0.005;
if(fabs(mCalibData->distCoeffs.at<double>(0)) < eps)
mCalibFlags |= cv::CALIB_FIX_K1;<fim_suffix> if(fabs(mCalibData->distCoeffs.at<double>(1)) < eps)
mCalibFlags |= cv::CALIB_FIX_K2;
}
if(!(mCalibFlags & cv::CALIB_FIX_K3)) {
const double eps = 0.005;
if(fabs(mCalibData->distCoeffs.at<double>(4)) < eps)
mCalibFlags |= cv::CALIB_FIX_K3;
}
}
}
bool calib::calibController::getCommonCalibrationState() const
{
int rating = (int)getFramesNumberState() + (int)getConfidenceIntrervalsState() +
(int)getRMSState() + (int)mCoverageQualityState;
return rating == 4;
}
bool calib::calibController::getFramesNumberState() const
{<fim_middle> }
if(!(mCalibFlags & cv::CALIB_FIX_K2)) {
const double eps = 0.005;<fim_end>
|
mCalibData->cameraMatrix.at<double>(0,0) =
mCalibData->cameraMatrix.at<double>(1,1);
}
}
if(!(mCalibFlags & cv::CALIB_ZERO_TANGENT_DIST)) {
const double eps = 0.005;
if(fabs(mCalibData->distCoeffs.at<double>(2)) < eps &&
fabs(mCalibData->distCoeffs.at<double>(3)) < eps)
mCalibFlags |= cv::CALIB_ZERO_TANGENT_DIST;
}
if(!(mCalibFlags & cv::CALIB_FIX_K1)) {
const double eps = 0.005;
if(fabs(mCalibData->distCoeffs.at<double>(0)) < eps)
mCalibFlags |= cv::CALIB_FIX_K1;
|
}
if(!(mCalibFlags & cv::CALIB_FIX_K2)) {
const double eps = 0.005;
|
if(fabs(mCalibData->distCoeffs.at<double>(1)) < eps)
mCalibFlags |= cv::CALIB_FIX_K2;
}
if(!(mCalibFlags & cv::CALIB_FIX_K3)) {
const double eps = 0.005;
if(fabs(mCalibData->distCoeffs.at<double>(4)) < eps)
mCalibFlags |= cv::CALIB_FIX_K3;
}
}
}
bool calib::calibController::getCommonCalibrationState() const
{
int rating = (int)getFramesNumberState() + (int)getConfidenceIntrervalsState() +
(int)getRMSState() + (int)mCoverageQualityState;
return rating == 4;
}
bool calib::calibController::getFramesNumberState() const
{
|
random
|
<fim_prefix>buft_override {
const char * pattern;
ggml_backend_buffer_type_t buft;
};
struct llama_model_params {
// NULL-terminated list of devices to use for offloading (if NULL, all available devices are used)
ggml_backend_dev_t * devices;
// NULL-terminated list of buffer types to use for tensors that match a pattern
const struct llama_model_tensor_buft_override * tensor_buft_overrides;
int32_t n_gpu_layers; // number of layers to store in VRAM
enum llama_split_mode split_mode; // how to split the model across multiple GPUs
// the GPU that is used for the entire model when split_mode is LLAMA_SPLIT_MODE_NONE
<fim_suffix>
// proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices()
const float * tensor_split;
// Called with a progress value between 0.0 and 1.0. Pass NULL to disable.
// If the provided progress_callback returns true, model loading continues.
// If it returns false, model loading is immediately aborted.
llama_progress_callback progress_callback;
// context pointer passed to the progress callback
void * progress_callback_user_data;
// override key-value pairs of the model meta data
const struct llama_model_kv_override * kv_overrides;
// Keep the booleans together to avoi<fim_middle>int32_t main_gpu;<fim_end>
|
buft_override {
const char * pattern;
ggml_backend_buffer_type_t buft;
};
struct llama_model_params {
// NULL-terminated list of devices to use for offloading (if NULL, all available devices are used)
ggml_backend_dev_t * devices;
// NULL-terminated list of buffer types to use for tensors that match a pattern
const struct llama_model_tensor_buft_override * tensor_buft_overrides;
int32_t n_gpu_layers; // number of layers to store in VRAM
enum llama_split_mode split_mode; // how to split the model across multiple GPUs
// the GPU that is used for the entire model when split_mode is LLAMA_SPLIT_MODE_NONE
|
int32_t main_gpu;
|
// proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices()
const float * tensor_split;
// Called with a progress value between 0.0 and 1.0. Pass NULL to disable.
// If the provided progress_callback returns true, model loading continues.
// If it returns false, model loading is immediately aborted.
llama_progress_callback progress_callback;
// context pointer passed to the progress callback
void * progress_callback_user_data;
// override key-value pairs of the model meta data
const struct llama_model_kv_override * kv_overrides;
// Keep the booleans together to avoi
|
ast_based
|
<fim_prefix> nullptr, 0, nullptr, nullptr, false, &mgr) == 0) {
osd_tess = osd_tesseract_;
osd_tesseract_->set_source_resolution(thresholder_->GetSourceYResolution());
} else {
tprintf(
"Warning: Auto orientation and script detection requested,"
" but osd language failed to load\n");
delete osd_tesseract_;
osd_tesseract_ = nullptr;
}
}
}
#endif // ndef DISABLED_LEGACY_ENGINE
if (tesseract_->SegmentPage(input_file_.c_str(), block_list_, osd_tess, &osr) < 0) {
return -1;
}
// If Devanagari is being recognized, we use different images for page seg
// and for OCR.
<fim_suffix>;
return 0;
}
/**
* Return average gradient of lines on page.
*/
float TessBaseAPI::GetGradient() {
return tesseract_->gradient();
}
/** Delete the pageres and clear the block list ready for a new page. */
void TessBaseAPI::ClearResults() {
if (tesseract_ != nullptr) {
tesseract_->Clear();
}
delete page_res_;
page_res_ = nullptr;
recognition_done_ = false;
if (block_list_ == nullptr) {
block_list_ = new BLOCK_LIST;
} else {
block_list_->clear();
}
if (paragraph_models_ != nullptr) {
for (auto model : *paragraph_models_) {
delete model;
}
delete paragraph_models_;
paragraph_models_ = nullptr;
}
}
/**
* Return the length of the ou<fim_middle>tesseract_->PrepareForTessOCR(block_list_, osd_tess, &osr)<fim_end>
|
nullptr, 0, nullptr, nullptr, false, &mgr) == 0) {
osd_tess = osd_tesseract_;
osd_tesseract_->set_source_resolution(thresholder_->GetSourceYResolution());
} else {
tprintf(
"Warning: Auto orientation and script detection requested,"
" but osd language failed to load\n");
delete osd_tesseract_;
osd_tesseract_ = nullptr;
}
}
}
#endif // ndef DISABLED_LEGACY_ENGINE
if (tesseract_->SegmentPage(input_file_.c_str(), block_list_, osd_tess, &osr) < 0) {
return -1;
}
// If Devanagari is being recognized, we use different images for page seg
// and for OCR.
|
tesseract_->PrepareForTessOCR(block_list_, osd_tess, &osr)
|
;
return 0;
}
/**
* Return average gradient of lines on page.
*/
float TessBaseAPI::GetGradient() {
return tesseract_->gradient();
}
/** Delete the pageres and clear the block list ready for a new page. */
void TessBaseAPI::ClearResults() {
if (tesseract_ != nullptr) {
tesseract_->Clear();
}
delete page_res_;
page_res_ = nullptr;
recognition_done_ = false;
if (block_list_ == nullptr) {
block_list_ = new BLOCK_LIST;
} else {
block_list_->clear();
}
if (paragraph_models_ != nullptr) {
for (auto model : *paragraph_models_) {
delete model;
}
delete paragraph_models_;
paragraph_models_ = nullptr;
}
}
/**
* Return the length of the ou
|
ast_based
|
<fim_prefix> type = LLAMA_TOKEN_TYPE_CONTROL;
} else if (text.empty()) {
type = LLAMA_TOKEN_TYPE_CONTROL;
} else if (sscanf(text.c_str(), "<0x%02hhX>", &byte_val) == 1) {
// Text of byte tokens is already in the expected format.
type = LLAMA_TOKEN_TYPE_BYTE;
} else {
type = LLAMA_TOKEN_TYPE_NORMAL;
}
text = llama_escape_whitespaces(text);
vocab->id_to_token[id].text = text;
vocab->id_to_token[id].score = score;
vocab->id_to_token[id].type = type;
vocab->token_to_id.emplace(text, id);
}
}<fim_suffix> size *= gg_weights->ne[dim];
}
for (int ct = 0; ct < size; ++ct) {
int64_t i0 = 0; int64_t i1 = 0;
int64_t i2 = 0; int64_t i3 = 0;
ggml_unravel_index(gg_weights, ct, &i0, &i1, &i2, &i3);
ggml_set_f32_nd(gg_weights, i0, i1, i2, i3, karpathy_weights[ct]);
}
}
static void save_as_llama_model(
struct my_llama_vocab * vocab, struct my_llama_model * model, TransformerWeights* w, const char * filename
) {
// convert AK weights into GG weights one by one.
// w->token_embedding_table -> model->tok_embeddings
// float* -> struct ggml_tensor<fim_middle>}
static void convert_weights_ak_to_gg(struct ggml_tensor * gg_weights, const float * karpathy_weights) {
int size = 1;
for (int dim = 0; dim < ggml_n_dims(gg_weights); ++dim) {<fim_end>
|
type = LLAMA_TOKEN_TYPE_CONTROL;
} else if (text.empty()) {
type = LLAMA_TOKEN_TYPE_CONTROL;
} else if (sscanf(text.c_str(), "<0x%02hhX>", &byte_val) == 1) {
// Text of byte tokens is already in the expected format.
type = LLAMA_TOKEN_TYPE_BYTE;
} else {
type = LLAMA_TOKEN_TYPE_NORMAL;
}
text = llama_escape_whitespaces(text);
vocab->id_to_token[id].text = text;
vocab->id_to_token[id].score = score;
vocab->id_to_token[id].type = type;
vocab->token_to_id.emplace(text, id);
}
}
|
}
static void convert_weights_ak_to_gg(struct ggml_tensor * gg_weights, const float * karpathy_weights) {
int size = 1;
for (int dim = 0; dim < ggml_n_dims(gg_weights); ++dim) {
|
size *= gg_weights->ne[dim];
}
for (int ct = 0; ct < size; ++ct) {
int64_t i0 = 0; int64_t i1 = 0;
int64_t i2 = 0; int64_t i3 = 0;
ggml_unravel_index(gg_weights, ct, &i0, &i1, &i2, &i3);
ggml_set_f32_nd(gg_weights, i0, i1, i2, i3, karpathy_weights[ct]);
}
}
static void save_as_llama_model(
struct my_llama_vocab * vocab, struct my_llama_model * model, TransformerWeights* w, const char * filename
) {
// convert AK weights into GG weights one by one.
// w->token_embedding_table -> model->tok_embeddings
// float* -> struct ggml_tensor
|
random
|
<fim_prefix> if (vflip) {
dst_rect.size.y = -dst_rect.size.y;
}
texture->draw_rect_region(ci, dst_rect, Rect2(Vector2(), texture->get_size()), Color(1, 1, 1), false);
} break;
}
}
void AnimatedSprite2D::set_sprite_frames(const Ref<SpriteFrames> &p_frames) {
if (frames == p_frames) {
return;
}
if (frames.is_valid()) {
frames->disconnect(CoreStringName(changed), callable_mp(this, &AnimatedSprite2D::_res_changed));
}
stop();
frames = p_frames;
if (frames.is_valid()) {
frames->connect(CoreStringName(changed), callable_mp(this, &AnimatedSprite2D::_res_changed));
List<StringName> al;
frames->get_animation_list(&al);
if (al.is_empty()) {
set_animation(StringName());
<fim_suffix>;
} else {
if (!frames->has_animation(animation)) {
set_animation(al.front()->get());
}
if (!frames->has_animation(autoplay)) {
autoplay = String();
}
}
}
notify_property_list_changed();
queue_redraw();
update_configuration_warnings();
emit_signal("sprite_frames_changed");
}
Ref<SpriteFrames> AnimatedSprite2D::get_sprite_frames() const {
return frames;
}
void AnimatedSprite2D::set_frame(int p_frame) {
set_frame_and_progress(p_frame, std::signbit(get_playing_speed()) ? 1.0 : 0.0);
}
int AnimatedSprite2D::get_frame() const {
return frame;
}
void AnimatedSprite2D::set_frame_progress(real_t p_progress) {
frame_progress = p_progress;
}
real_t AnimatedSprite2<fim_middle>autoplay = String()<fim_end>
|
if (vflip) {
dst_rect.size.y = -dst_rect.size.y;
}
texture->draw_rect_region(ci, dst_rect, Rect2(Vector2(), texture->get_size()), Color(1, 1, 1), false);
} break;
}
}
void AnimatedSprite2D::set_sprite_frames(const Ref<SpriteFrames> &p_frames) {
if (frames == p_frames) {
return;
}
if (frames.is_valid()) {
frames->disconnect(CoreStringName(changed), callable_mp(this, &AnimatedSprite2D::_res_changed));
}
stop();
frames = p_frames;
if (frames.is_valid()) {
frames->connect(CoreStringName(changed), callable_mp(this, &AnimatedSprite2D::_res_changed));
List<StringName> al;
frames->get_animation_list(&al);
if (al.is_empty()) {
set_animation(StringName());
|
autoplay = String()
|
;
} else {
if (!frames->has_animation(animation)) {
set_animation(al.front()->get());
}
if (!frames->has_animation(autoplay)) {
autoplay = String();
}
}
}
notify_property_list_changed();
queue_redraw();
update_configuration_warnings();
emit_signal("sprite_frames_changed");
}
Ref<SpriteFrames> AnimatedSprite2D::get_sprite_frames() const {
return frames;
}
void AnimatedSprite2D::set_frame(int p_frame) {
set_frame_and_progress(p_frame, std::signbit(get_playing_speed()) ? 1.0 : 0.0);
}
int AnimatedSprite2D::get_frame() const {
return frame;
}
void AnimatedSprite2D::set_frame_progress(real_t p_progress) {
frame_progress = p_progress;
}
real_t AnimatedSprite2
|
ast_based
|
<fim_prefix> float penalty_present); // 0.0 = disabled
/// @details DRY sampler, designed by p-e-w, as described in: https://github.com/oobabooga/text-generation-webui/pull/5677, porting Koboldcpp implementation authored by pi6am: https://github.com/LostRuins/koboldcpp/pull/982
LLAMA_API struct llama_sampler * llama_sampler_init_dry(
const struct llama_vocab * vocab,
int32_t n_ctx_train,
float dry_multiplier,
float dry_base,
int32_t dry_allowed_length,
int32_t dry_penalty_last_n,<fim_suffix> LLAMA_API struct llama_sampler * llama_sampler_init_logit_bias(
int32_t n_vocab,
int32_t n_logit_bias,
const llama_logit_bias * logit_bias);
// this sampler is meant to be used for fill-in-the-middle infilling
// it's supposed to be used after top_k + top_p sampling
//
// 1. if the sum of the EOG probs times the number of candidates is higher than the sum of the other probs -> pick EOG
// 2. combine probs of tokens that have the same prefix
//
// example:
//
// - before:
// "hel": 0.5
// "hell": 0.2
// "hello": 0.1
// "dummy": 0.1
//<fim_middle> const char ** seq_breakers,
size_t num_breakers);
<fim_end>
|
float penalty_present); // 0.0 = disabled
/// @details DRY sampler, designed by p-e-w, as described in: https://github.com/oobabooga/text-generation-webui/pull/5677, porting Koboldcpp implementation authored by pi6am: https://github.com/LostRuins/koboldcpp/pull/982
LLAMA_API struct llama_sampler * llama_sampler_init_dry(
const struct llama_vocab * vocab,
int32_t n_ctx_train,
float dry_multiplier,
float dry_base,
int32_t dry_allowed_length,
int32_t dry_penalty_last_n,
|
const char ** seq_breakers,
size_t num_breakers);
|
LLAMA_API struct llama_sampler * llama_sampler_init_logit_bias(
int32_t n_vocab,
int32_t n_logit_bias,
const llama_logit_bias * logit_bias);
// this sampler is meant to be used for fill-in-the-middle infilling
// it's supposed to be used after top_k + top_p sampling
//
// 1. if the sum of the EOG probs times the number of candidates is higher than the sum of the other probs -> pick EOG
// 2. combine probs of tokens that have the same prefix
//
// example:
//
// - before:
// "hel": 0.5
// "hell": 0.2
// "hello": 0.1
// "dummy": 0.1
//
|
random
|
<fim_prefix> for (auto const& output : run_outputs) {
target_node_names.push_back(output.node()->name());
}
TF_RETURN_IF_ERROR(impl()->MaybeExtendGraph());
return impl()->session_->Run(run_options, feeds, output_tensor_names,
target_node_names, outputs, run_metadata);
}
absl::Status ClientSession::Run(
const RunOptions& run_options, const FeedType& inputs,
const std::vector<Output>& fetch_outputs,
const std::vector<Operation>& run_outputs, std::vector<Tensor>* outputs,
RunMetadata* run_metadata,
const thread::ThreadPoolOptions& threadpool_options) const {
std::vector<std::pair<string, Tensor>> feeds;
for (auto const& feed : inputs) {
<fim_suffix>;
feeds.emplace_back(feed.first.name(), feed.second.tensor);
}
std::vector<string> output_tensor_names;
output_tensor_names.reserve(fetch_outputs.size());
for (auto const& output : fetch_outputs) {
output_tensor_names.push_back(output.name());
}
std::vector<string> target_node_names;
target_node_names.reserve(run_outputs.size());
for (auto const& output : run_outputs) {
target_node_names.push_back(output.node()->name());
}
TF_RETURN_IF_ERROR(impl()->MaybeExtendGraph());
return impl()->session_->Run(run_options, feeds, output_tensor_names,
target_node_names, outputs, run_metadata,
threadpool_option<fim_middle>TF_RETURN_IF_ERROR(feed.second.status)<fim_end>
|
for (auto const& output : run_outputs) {
target_node_names.push_back(output.node()->name());
}
TF_RETURN_IF_ERROR(impl()->MaybeExtendGraph());
return impl()->session_->Run(run_options, feeds, output_tensor_names,
target_node_names, outputs, run_metadata);
}
absl::Status ClientSession::Run(
const RunOptions& run_options, const FeedType& inputs,
const std::vector<Output>& fetch_outputs,
const std::vector<Operation>& run_outputs, std::vector<Tensor>* outputs,
RunMetadata* run_metadata,
const thread::ThreadPoolOptions& threadpool_options) const {
std::vector<std::pair<string, Tensor>> feeds;
for (auto const& feed : inputs) {
|
TF_RETURN_IF_ERROR(feed.second.status)
|
;
feeds.emplace_back(feed.first.name(), feed.second.tensor);
}
std::vector<string> output_tensor_names;
output_tensor_names.reserve(fetch_outputs.size());
for (auto const& output : fetch_outputs) {
output_tensor_names.push_back(output.name());
}
std::vector<string> target_node_names;
target_node_names.reserve(run_outputs.size());
for (auto const& output : run_outputs) {
target_node_names.push_back(output.node()->name());
}
TF_RETURN_IF_ERROR(impl()->MaybeExtendGraph());
return impl()->session_->Run(run_options, feeds, output_tensor_names,
target_node_names, outputs, run_metadata,
threadpool_option
|
ast_based
|
<fim_prefix>o_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
tsv_str += "\t" + std::to_string(left);
tsv_str += "\t" + std::to_string(top);
tsv_str += "\t" + std::to_string(right - left);
tsv_str += "\t" + std::to_string(bottom - top);
tsv_str += "\t" + std::to_string(res_it->Confidence(RIL_WORD));
tsv_str += "\t";
#if !defined(NDEBUG)
// Increment counts if at end of block/paragraph/textline.
if (res_it->IsAtFinalElement(RIL_TEXTLINE, RIL_WORD)) {
lcnt++;
}
if (res_it->IsAtFinalElement(RIL_PARA, RIL_WORD)) {
pcnt++;
}
if (res_it->IsAtFinalElement(RIL_BLOCK, RIL_WORD)) {
bcnt++;
}
#endif
do {
tsv_str += <fim_suffix>.get();
res_it->Next(RIL_SYMBOL);
} while (!res_it->Empty(RIL_BLOCK) && !res_it->IsAtBeginningOf(RIL_WORD));
tsv_str += "\n"; // end of row
#if !defined(NDEBUG)
wcnt++;
#endif
}
return copy_string(tsv_str);
}
/** The 5 numbers output for each box (the usual 4 and a page number.) */
const int kNumbersPerBlob = 5;
/**
* The number of bytes taken by each number. Since we use int16_t for ICOORD,
* assume only 5 digits max.
*/
const int kBytesPerNumber = 5;
/**
* Multiplier for max expected textlength assumes (kBytesPerNumber + space)
* * kNumbersPerBlob plus the newline. Add to this the
* original UTF8 characters, and one kMaxBytesPerLine for safety.
*/
const int k<fim_middle>std::unique_ptr<const char[]>(res_it->GetUTF8Text(RIL_SYMBOL))<fim_end>
|
o_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
tsv_str += "\t" + std::to_string(left);
tsv_str += "\t" + std::to_string(top);
tsv_str += "\t" + std::to_string(right - left);
tsv_str += "\t" + std::to_string(bottom - top);
tsv_str += "\t" + std::to_string(res_it->Confidence(RIL_WORD));
tsv_str += "\t";
#if !defined(NDEBUG)
// Increment counts if at end of block/paragraph/textline.
if (res_it->IsAtFinalElement(RIL_TEXTLINE, RIL_WORD)) {
lcnt++;
}
if (res_it->IsAtFinalElement(RIL_PARA, RIL_WORD)) {
pcnt++;
}
if (res_it->IsAtFinalElement(RIL_BLOCK, RIL_WORD)) {
bcnt++;
}
#endif
do {
tsv_str +=
|
std::unique_ptr<const char[]>(res_it->GetUTF8Text(RIL_SYMBOL))
|
.get();
res_it->Next(RIL_SYMBOL);
} while (!res_it->Empty(RIL_BLOCK) && !res_it->IsAtBeginningOf(RIL_WORD));
tsv_str += "\n"; // end of row
#if !defined(NDEBUG)
wcnt++;
#endif
}
return copy_string(tsv_str);
}
/** The 5 numbers output for each box (the usual 4 and a page number.) */
const int kNumbersPerBlob = 5;
/**
* The number of bytes taken by each number. Since we use int16_t for ICOORD,
* assume only 5 digits max.
*/
const int kBytesPerNumber = 5;
/**
* Multiplier for max expected textlength assumes (kBytesPerNumber + space)
* * kNumbersPerBlob plus the newline. Add to this the
* original UTF8 characters, and one kMaxBytesPerLine for safety.
*/
const int k
|
ast_based
|
<fim_prefix>sion_reloading = false;
bool embedded_in_editor = false;
bool recovery_mode_hint = false;
bool _print_header = true;
static inline Engine *singleton = nullptr;
String write_movie_path;
String shader_cache_path;
static constexpr int SERVER_SYNC_FRAME_COUNT_WARNING = 5;
int server_syncs = 0;
bool frame_server_synced = false;
bool freeze_time_scale = false;
public:
static Engine *get_singleton();
virtual void set_physics_ticks_per_second(int p_ips);
virtual int get_physics_ticks_per_second() const;
virtual void set_max_physics_steps_per_frame(int p_max_physics_steps);
virtual int get_max_physics_steps_per_frame() const;
void set_physics_jitter_fix(double p_threshold);
<fim_suffix>
virtual void set_max_fps(int p_fps);
virtual int get_max_fps() const;
virtual void set_audio_output_latency(int p_msec);
virtual int get_audio_output_latency() const;
virtual double get_frames_per_second() const { return _fps; }
uint64_t get_frames_drawn();
uint64_t get_physics_frames() const { return _physics_frames; }
uint64_t get_process_frames() const { return _process_frames; }
bool is_in_physics_frame() const { return _in_physics; }
uint64_t get_frame_ticks() const { return _frame_ticks; }
double get_process_step() const { return _process_step; }
double get_physics_interpolation_fraction() const { return _physics_interpolation_fraction; }
void set_time_scale(double <fim_middle>double get_physics_jitter_fix() const;<fim_end>
|
sion_reloading = false;
bool embedded_in_editor = false;
bool recovery_mode_hint = false;
bool _print_header = true;
static inline Engine *singleton = nullptr;
String write_movie_path;
String shader_cache_path;
static constexpr int SERVER_SYNC_FRAME_COUNT_WARNING = 5;
int server_syncs = 0;
bool frame_server_synced = false;
bool freeze_time_scale = false;
public:
static Engine *get_singleton();
virtual void set_physics_ticks_per_second(int p_ips);
virtual int get_physics_ticks_per_second() const;
virtual void set_max_physics_steps_per_frame(int p_max_physics_steps);
virtual int get_max_physics_steps_per_frame() const;
void set_physics_jitter_fix(double p_threshold);
|
double get_physics_jitter_fix() const;
|
virtual void set_max_fps(int p_fps);
virtual int get_max_fps() const;
virtual void set_audio_output_latency(int p_msec);
virtual int get_audio_output_latency() const;
virtual double get_frames_per_second() const { return _fps; }
uint64_t get_frames_drawn();
uint64_t get_physics_frames() const { return _physics_frames; }
uint64_t get_process_frames() const { return _process_frames; }
bool is_in_physics_frame() const { return _in_physics; }
uint64_t get_frame_ticks() const { return _frame_ticks; }
double get_process_step() const { return _process_step; }
double get_physics_interpolation_fraction() const { return _physics_interpolation_fraction; }
void set_time_scale(double
|
ast_based
|
<fim_prefix> LLAMA_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q3_K_S = 11, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q3_K_M = 12, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q3_K_L = 13, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q4_K_S = 14, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q4_K_M = 15, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q5_K_S = 16, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q5_K_M = 17, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q6_K = 18, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ2_XXS = 19, // except 1d tensors<fim_suffix> LLAMA_FTYPE_MOSTLY_IQ3_XS = 22, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ3_XXS = 23, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ1_S = 24, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ4_NL = 25, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ3_S = 26, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ3_M = 27, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ2_S = 28, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ2_M = 29, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ4_XS = 30, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ1_M = 31, // except 1d tensors<fim_middle> LLAMA_FTYPE_MOSTLY_IQ2_XS = 20, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q2_K_S = 21, // except 1d tensors<fim_end>
|
LLAMA_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q3_K_S = 11, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q3_K_M = 12, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q3_K_L = 13, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q4_K_S = 14, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q4_K_M = 15, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q5_K_S = 16, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q5_K_M = 17, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q6_K = 18, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ2_XXS = 19, // except 1d tensors
|
LLAMA_FTYPE_MOSTLY_IQ2_XS = 20, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q2_K_S = 21, // except 1d tensors
|
LLAMA_FTYPE_MOSTLY_IQ3_XS = 22, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ3_XXS = 23, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ1_S = 24, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ4_NL = 25, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ3_S = 26, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ3_M = 27, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ2_S = 28, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ2_M = 29, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ4_XS = 30, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ1_M = 31, // except 1d tensors
|
random
|
<fim_prefix>track_key_pair.second, p_mode, p_auto ? Animation::HANDLE_SET_MODE_AUTO : Animation::HANDLE_SET_MODE_RESET);
}
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->commit_action();
}
void AnimationBezierTrackEdit::_clear_selection_for_anim(const Ref<Animation> &p_anim) {
if (!(animation == p_anim) || !is_visible()) {
return;
}
_clear_selection();
}
void AnimationBezierTrackEdit::_select_at_anim(const Ref<Animation> &p_anim, int p_track, real_t p_pos, bool p_single) {
if (!(animation == p_anim) || !is_visible()) <fim_suffix>
int idx = animation->track_find_key(p_track, p_pos, Animation::FIND_MODE_APPROX);
ERR_FAIL_COND(idx < 0);
selection.insert(IntPair(p_track, idx));
emit_signal(SNAME("select_key"), idx, p_single, p_track);
queue_redraw();
}
void AnimationBezierTrackEdit::gui_input(const Ref<InputEvent> &p_event) {
ERR_FAIL_COND(p_event.is_null());
if (panner->gui_input(p_event)) {
accept_event();
return;
}
if (p_event->is_pressed()) {
if (ED_IS_SHORTCUT("animation_editor/duplicate_selected_keys", p_event)) {
if (!read_only) {
duplicate_selected_keys(-1.0, false);
}
accept_event();
}
if (ED_IS_SHORTCUT("animation_editor/cut_selected_keys", p_event)) {
if (!read_only) {
<fim_middle>{
return;
}<fim_end>
|
track_key_pair.second, p_mode, p_auto ? Animation::HANDLE_SET_MODE_AUTO : Animation::HANDLE_SET_MODE_RESET);
}
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->commit_action();
}
void AnimationBezierTrackEdit::_clear_selection_for_anim(const Ref<Animation> &p_anim) {
if (!(animation == p_anim) || !is_visible()) {
return;
}
_clear_selection();
}
void AnimationBezierTrackEdit::_select_at_anim(const Ref<Animation> &p_anim, int p_track, real_t p_pos, bool p_single) {
if (!(animation == p_anim) || !is_visible())
|
{
return;
}
|
int idx = animation->track_find_key(p_track, p_pos, Animation::FIND_MODE_APPROX);
ERR_FAIL_COND(idx < 0);
selection.insert(IntPair(p_track, idx));
emit_signal(SNAME("select_key"), idx, p_single, p_track);
queue_redraw();
}
void AnimationBezierTrackEdit::gui_input(const Ref<InputEvent> &p_event) {
ERR_FAIL_COND(p_event.is_null());
if (panner->gui_input(p_event)) {
accept_event();
return;
}
if (p_event->is_pressed()) {
if (ED_IS_SHORTCUT("animation_editor/duplicate_selected_keys", p_event)) {
if (!read_only) {
duplicate_selected_keys(-1.0, false);
}
accept_event();
}
if (ED_IS_SHORTCUT("animation_editor/cut_selected_keys", p_event)) {
if (!read_only) {
|
ast_based
|
<fim_prefix> bool use_validation_layers = false;
bool generate_spirv_debug_info = false;
bool extra_gpu_memory_tracking = false;
#if defined(DEBUG_ENABLED) || defined(DEV_ENABLED)
bool accurate_breadcrumbs = false;
#endif
int32_t gpu_idx = -1;
uint64_t _process_frames = 0;
bool _in_physics = false;
List<Singleton> singletons;
HashMap<StringName, Object *> singleton_ptrs;
bool editor_hint = false;
bool project_manager_hint = false;
bool extension_reloading = false;
bool embedded_in_editor = false;
bool recovery_mode_hint = false;
bool _print_header = true;
static inline Engine *singleton = nullptr;
String write_movie_path;
String shader_cache_path;<fim_suffix> bool frame_server_synced = false;
bool freeze_time_scale = false;
public:
static Engine *get_singleton();
virtual void set_physics_ticks_per_second(int p_ips);
virtual int get_physics_ticks_per_second() const;
virtual void set_max_physics_steps_per_frame(int p_max_physics_steps);
virtual int get_max_physics_steps_per_frame() const;
void set_physics_jitter_fix(double p_threshold);
double get_physics_jitter_fix() const;
virtual void set_max_fps(int p_fps);
virtual int get_max_fps() const;
virtual void set_audio_output_latency(int p_msec);
virtual int get_audio_output_latency() const;
virtual double get_frames_per_second() const { return _fps; }
<fim_middle>
static constexpr int SERVER_SYNC_FRAME_COUNT_WARNING = 5;
int server_syncs = 0;<fim_end>
|
bool use_validation_layers = false;
bool generate_spirv_debug_info = false;
bool extra_gpu_memory_tracking = false;
#if defined(DEBUG_ENABLED) || defined(DEV_ENABLED)
bool accurate_breadcrumbs = false;
#endif
int32_t gpu_idx = -1;
uint64_t _process_frames = 0;
bool _in_physics = false;
List<Singleton> singletons;
HashMap<StringName, Object *> singleton_ptrs;
bool editor_hint = false;
bool project_manager_hint = false;
bool extension_reloading = false;
bool embedded_in_editor = false;
bool recovery_mode_hint = false;
bool _print_header = true;
static inline Engine *singleton = nullptr;
String write_movie_path;
String shader_cache_path;
|
static constexpr int SERVER_SYNC_FRAME_COUNT_WARNING = 5;
int server_syncs = 0;
|
bool frame_server_synced = false;
bool freeze_time_scale = false;
public:
static Engine *get_singleton();
virtual void set_physics_ticks_per_second(int p_ips);
virtual int get_physics_ticks_per_second() const;
virtual void set_max_physics_steps_per_frame(int p_max_physics_steps);
virtual int get_max_physics_steps_per_frame() const;
void set_physics_jitter_fix(double p_threshold);
double get_physics_jitter_fix() const;
virtual void set_max_fps(int p_fps);
virtual int get_max_fps() const;
virtual void set_audio_output_latency(int p_msec);
virtual int get_audio_output_latency() const;
virtual double get_frames_per_second() const { return _fps; }
|
random
|
<fim_prefix>HDR;
} else {
target_format = Image::FORMAT_ASTC_4x4;
}
} else if (p_format == Image::ASTCFormat::ASTC_FORMAT_8x8) {
if (is_hdr) {
target_format = Image::FORMAT_ASTC_8x8_HDR;
} else {
target_format = Image::FORMAT_ASTC_8x8;
}
block_x = 8;
block_y = 8;
}
// Compress image data and (if required) mipmaps.
const bool has_mipmaps = r_img->has_mipmaps();
int width = r_img->get_width();
int height = r_img->get_height();
int required_width = (width % block_x) != 0 ? width + (block_x - (width % block_x)) : width;
int required_height = (height % block_y) != 0 ? height + (block_y - (height % block_y)) : height;
if (width != required_width || height != required_height) <fim_suffix>
height = required_height;
}
print_verbose(vformat("astcenc: Encoding image size %dx%d to format %s%s.", width, height, Image::get_format_name(target_format), has_mipmaps ? ", with mipmaps" : ""));
// Initialize astcenc.
const int64_t dest_size = Image::get_image_data_size(width, height, target_format, has_mipmaps);
Vector<uint8_t> dest_data;
dest_data.resize(dest_size);
uint8_t *dest_write = dest_data.ptrw();
astcenc_config config;
config.block_x = block_x;
config.block_y = block_y;
config.profile = profile;
const float quality = ASTCENC_PRE_MEDIUM;
astcenc_error status = astcenc_config_init(profile, block_x, block_y, 1, quality, 0, &config);
ERR_FAIL_COND_MSG(status != <fim_middle>{
// Resize texture to fit block size.
r_img->resize(required_width, required_height);
width = required_width;<fim_end>
|
HDR;
} else {
target_format = Image::FORMAT_ASTC_4x4;
}
} else if (p_format == Image::ASTCFormat::ASTC_FORMAT_8x8) {
if (is_hdr) {
target_format = Image::FORMAT_ASTC_8x8_HDR;
} else {
target_format = Image::FORMAT_ASTC_8x8;
}
block_x = 8;
block_y = 8;
}
// Compress image data and (if required) mipmaps.
const bool has_mipmaps = r_img->has_mipmaps();
int width = r_img->get_width();
int height = r_img->get_height();
int required_width = (width % block_x) != 0 ? width + (block_x - (width % block_x)) : width;
int required_height = (height % block_y) != 0 ? height + (block_y - (height % block_y)) : height;
if (width != required_width || height != required_height)
|
{
// Resize texture to fit block size.
r_img->resize(required_width, required_height);
width = required_width;
|
height = required_height;
}
print_verbose(vformat("astcenc: Encoding image size %dx%d to format %s%s.", width, height, Image::get_format_name(target_format), has_mipmaps ? ", with mipmaps" : ""));
// Initialize astcenc.
const int64_t dest_size = Image::get_image_data_size(width, height, target_format, has_mipmaps);
Vector<uint8_t> dest_data;
dest_data.resize(dest_size);
uint8_t *dest_write = dest_data.ptrw();
astcenc_config config;
config.block_x = block_x;
config.block_y = block_y;
config.profile = profile;
const float quality = ASTCENC_PRE_MEDIUM;
astcenc_error status = astcenc_config_init(profile, block_x, block_y, 1, quality, 0, &config);
ERR_FAIL_COND_MSG(status !=
|
ast_based
|
<fim_prefix> return shader_cache_path;
}
Engine *Engine::get_singleton() {
return singleton;
}
bool Engine::notify_frame_server_synced() {
frame_server_synced = true;
return server_syncs > SERVER_SYNC_FRAME_COUNT_WARNING;
}
void Engine::set_freeze_time_scale(bool p_frozen) {
freeze_time_scale = p_frozen;
}
void Engine::set_embedded_in_editor(bool p_enabled) {
embedded_in_editor = p_enabled;
}
bool Engine::is_embedded_in_editor() const {
return embedded_in_editor;
}
Engine::Engine() {
singleton = this;
}
Engine::~Engine() {
if (singleton == this) {
singleton = nullptr;
}
}
Engine::Singleton::Singleton(const StringName &p_name, Object *p_ptr, const StringName &p_class_name) :<fim_suffix>#ifdef DEBUG_ENABLED
RefCounted *rc = Object::cast_to<RefCounted>(p_ptr);
if (rc && !rc->is_referenced()) {
WARN_PRINT("You must use Ref<> to ensure the lifetime of a RefCounted object intended to be used as a singleton.");
}
#endif
}
<fim_middle> name(p_name),
ptr(p_ptr),
class_name(p_class_name) {<fim_end>
|
return shader_cache_path;
}
Engine *Engine::get_singleton() {
return singleton;
}
bool Engine::notify_frame_server_synced() {
frame_server_synced = true;
return server_syncs > SERVER_SYNC_FRAME_COUNT_WARNING;
}
void Engine::set_freeze_time_scale(bool p_frozen) {
freeze_time_scale = p_frozen;
}
void Engine::set_embedded_in_editor(bool p_enabled) {
embedded_in_editor = p_enabled;
}
bool Engine::is_embedded_in_editor() const {
return embedded_in_editor;
}
Engine::Engine() {
singleton = this;
}
Engine::~Engine() {
if (singleton == this) {
singleton = nullptr;
}
}
Engine::Singleton::Singleton(const StringName &p_name, Object *p_ptr, const StringName &p_class_name) :
|
name(p_name),
ptr(p_ptr),
class_name(p_class_name) {
|
#ifdef DEBUG_ENABLED
RefCounted *rc = Object::cast_to<RefCounted>(p_ptr);
if (rc && !rc->is_referenced()) {
WARN_PRINT("You must use Ref<> to ensure the lifetime of a RefCounted object intended to be used as a singleton.");
}
#endif
}
|
random
|
<fim_prefix>_modes.end(); ++restore_itr, ++handle_itr) {
const AnimMoveRestore &amr = *restore_itr;
Array key = amr.key;
undo_redo->add_undo_method(animation.ptr(), "track_insert_key", amr.track, amr.time, amr.key, 1);
undo_redo->add_undo_method(
this,
"_bezier_track_insert_key_at_anim",
animation,
amr.track,
amr.time,
key[0],
Vector2(key[1], key[2]),
Vector2(key[3], key[4]),
*handle_itr);
}
undo_redo->add_do_method(this, "_clear_selection_for_anim", animation);
undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation);
// 7 - Reselect.
int i = 0;
for (SelectionSet::Element *E = selection.back(); E; <fim_suffix>) {
real_t oldpos = animation->track_get_key_time(E->get().first, E->get().second);
real_t newpos = animation->track_get_key_time(E->get().first, E->get().second);
newpos += -scaling_selection_offset.x + (newpos - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
undo_redo->add_do_method(this, "_select_at_anim", animation, E->get().first, newpos, i == 0);
undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, oldpos, i == 0);
i++;
}
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->add_undo_method(ape, "_animatio<fim_middle>E = E->prev()<fim_end>
|
_modes.end(); ++restore_itr, ++handle_itr) {
const AnimMoveRestore &amr = *restore_itr;
Array key = amr.key;
undo_redo->add_undo_method(animation.ptr(), "track_insert_key", amr.track, amr.time, amr.key, 1);
undo_redo->add_undo_method(
this,
"_bezier_track_insert_key_at_anim",
animation,
amr.track,
amr.time,
key[0],
Vector2(key[1], key[2]),
Vector2(key[3], key[4]),
*handle_itr);
}
undo_redo->add_do_method(this, "_clear_selection_for_anim", animation);
undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation);
// 7 - Reselect.
int i = 0;
for (SelectionSet::Element *E = selection.back(); E;
|
E = E->prev()
|
) {
real_t oldpos = animation->track_get_key_time(E->get().first, E->get().second);
real_t newpos = animation->track_get_key_time(E->get().first, E->get().second);
newpos += -scaling_selection_offset.x + (newpos - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
undo_redo->add_do_method(this, "_select_at_anim", animation, E->get().first, newpos, i == 0);
undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, oldpos, i == 0);
i++;
}
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->add_undo_method(ape, "_animatio
|
ast_based
|
<fim_prefix>CCESSIBILITY_UPDATE notification.");
if (p_id.is_valid() && rid_owner.owns(p_id)) {
focus = p_id;
} else {
focus = RID();
}
}
RID AccessibilityDriverAccessKit::accessibility_get_window_root(DisplayServer::WindowID p_window_id) const {
const WindowData *wd = windows.getptr(p_window_id);
ERR_FAIL_NULL_V(wd, RID());
return wd->root_id;
}
accesskit_tree_update *AccessibilityDriverAccessKit::_accessibility_build_tree_update(void *p_user_data) {
DisplayServer::WindowID window_id = (DisplayServer::WindowID)(size_t)p_user_data;
ERR_FAIL_COND_V(!singleton->windows.has(window_id), nullptr);
WindowData &wd = singleton->windows[window_id];
singleton->in_accessibility_update = true;
<fim_suffix>
singleton->in_accessibility_update = false;
AccessibilityElement *focus_ae = singleton->rid_owner.get_or_null(singleton->focus);
uint32_t update_size = wd.update.size();
accesskit_node_id ac_focus = (accesskit_node_id)wd.root_id.get_id();
if (focus_ae && focus_ae->window_id == window_id) {
ac_focus = (accesskit_node_id)singleton->focus.get_id();
}
accesskit_tree_update *tree_update = (update_size > 0) ? accesskit_tree_update_with_capacity_and_focus(update_size, ac_focus) : accesskit_tree_update_with_focus(ac_focus);
for (const RID &rid : wd.update) {
AccessibilityElement *ae = singleton->rid_owner.get_or_null(rid);
if (ae && ae->node) {
for (const RID &child_rid : ae->ch<fim_middle>if (singleton->update_cb.is_valid()) {
singleton->update_cb.call(window_id);
}<fim_end>
|
CCESSIBILITY_UPDATE notification.");
if (p_id.is_valid() && rid_owner.owns(p_id)) {
focus = p_id;
} else {
focus = RID();
}
}
RID AccessibilityDriverAccessKit::accessibility_get_window_root(DisplayServer::WindowID p_window_id) const {
const WindowData *wd = windows.getptr(p_window_id);
ERR_FAIL_NULL_V(wd, RID());
return wd->root_id;
}
accesskit_tree_update *AccessibilityDriverAccessKit::_accessibility_build_tree_update(void *p_user_data) {
DisplayServer::WindowID window_id = (DisplayServer::WindowID)(size_t)p_user_data;
ERR_FAIL_COND_V(!singleton->windows.has(window_id), nullptr);
WindowData &wd = singleton->windows[window_id];
singleton->in_accessibility_update = true;
|
if (singleton->update_cb.is_valid()) {
singleton->update_cb.call(window_id);
}
|
singleton->in_accessibility_update = false;
AccessibilityElement *focus_ae = singleton->rid_owner.get_or_null(singleton->focus);
uint32_t update_size = wd.update.size();
accesskit_node_id ac_focus = (accesskit_node_id)wd.root_id.get_id();
if (focus_ae && focus_ae->window_id == window_id) {
ac_focus = (accesskit_node_id)singleton->focus.get_id();
}
accesskit_tree_update *tree_update = (update_size > 0) ? accesskit_tree_update_with_capacity_and_focus(update_size, ac_focus) : accesskit_tree_update_with_focus(ac_focus);
for (const RID &rid : wd.update) {
AccessibilityElement *ae = singleton->rid_owner.get_or_null(rid);
if (ae && ae->node) {
for (const RID &child_rid : ae->ch
|
ast_based
|
<fim_prefix>y) {
EditorUndoRedoManager *undo_redo = EditorUndoRedoManager::get_singleton();
undo_redo->create_action(TTR("Move Bezier Points"));
if (moving_handle == -1) {
real_t ratio = timeline->get_zoom_scale() * timeline_v_zoom;
undo_redo->add_do_method(animation.ptr(), "bezier_track_set_key_in_handle", moving_handle_track, moving_handle_key, moving_handle_left, ratio);
undo_redo->add_undo_method(animation.ptr(), "bezier_track_set_key_in_handle", moving_handle_track, moving_handle_key, animation->bezier_track_get_key_in_handle(moving_handle_track, moving_handle_key), ratio);
} else if (moving_handle == 1) {
real_t ratio = timeline->get_zoom_scale() * timeline_v_zoom;
<fim_suffix>
undo_redo->add_undo_method(animation.ptr(), "bezier_track_set_key_out_handle", moving_handle_track, moving_handle_key, animation->bezier_track_get_key_out_handle(moving_handle_track, moving_handle_key), ratio);
}
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->commit_action();
moving_handle = 0;
queue_redraw();
}
}
}
bool AnimationBezierTrackEdit::_try_select_at_ui_pos(const Point2 &p_pos, bool p_aggregate, bool p_deselectable) {
for (int i = 0; i < edit_points.size(); i++) {
// Path<fim_middle>undo_redo->add_do_method(animation.ptr(), "bezier_track_set_key_out_handle", moving_handle_track, moving_handle_key, moving_handle_right, ratio);<fim_end>
|
y) {
EditorUndoRedoManager *undo_redo = EditorUndoRedoManager::get_singleton();
undo_redo->create_action(TTR("Move Bezier Points"));
if (moving_handle == -1) {
real_t ratio = timeline->get_zoom_scale() * timeline_v_zoom;
undo_redo->add_do_method(animation.ptr(), "bezier_track_set_key_in_handle", moving_handle_track, moving_handle_key, moving_handle_left, ratio);
undo_redo->add_undo_method(animation.ptr(), "bezier_track_set_key_in_handle", moving_handle_track, moving_handle_key, animation->bezier_track_get_key_in_handle(moving_handle_track, moving_handle_key), ratio);
} else if (moving_handle == 1) {
real_t ratio = timeline->get_zoom_scale() * timeline_v_zoom;
|
undo_redo->add_do_method(animation.ptr(), "bezier_track_set_key_out_handle", moving_handle_track, moving_handle_key, moving_handle_right, ratio);
|
undo_redo->add_undo_method(animation.ptr(), "bezier_track_set_key_out_handle", moving_handle_track, moving_handle_key, animation->bezier_track_get_key_out_handle(moving_handle_track, moving_handle_key), ratio);
}
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->commit_action();
moving_handle = 0;
queue_redraw();
}
}
}
bool AnimationBezierTrackEdit::_try_select_at_ui_pos(const Point2 &p_pos, bool p_aggregate, bool p_deselectable) {
for (int i = 0; i < edit_points.size(); i++) {
// Path
|
ast_based
|
<fim_prefix>_size(width, height, target_format, has_mipmaps);
Vector<uint8_t> dest_data;
dest_data.resize(dest_size);
uint8_t *dest_write = dest_data.ptrw();
// Decompress image.
const int mip_count = has_mipmaps ? Image::get_image_required_mipmaps(width, height, target_format) : 0;
const uint8_t *src_data = r_img->ptr();
for (int i = 0; i < mip_count + 1; i++) {
const int64_t src_ofs = Image::get_image_mipmap_offset(width, height, src_format, i);
const uint8_t *mip_data = &src_data[src_ofs];
int64_t src_size;
if (i == mip_count) {
src_size = r_img->get_data_size() - src_ofs;
} else {
src_size = Image::get_image_mipmap_offset(width, height, src_format, i + 1) - src_ofs;
}
<fim_suffix>
const int64_t dst_ofs = Image::get_image_mipmap_offset_and_dimensions(width, height, target_format, i, dst_mip_w, dst_mip_h);
// Ensure that mip offset is a multiple of 8 (etcpak expects uint64_t pointer).
ERR_FAIL_COND(dst_ofs % 8 != 0);
uint8_t *dest_mip_write = &dest_write[dst_ofs];
astcenc_image image;
image.dim_x = dst_mip_w;
image.dim_y = dst_mip_h;
image.dim_z = 1;
image.data_type = is_hdr ? ASTCENC_TYPE_F16 : ASTCENC_TYPE_U8;
image.data = (void **)(&dest_mip_write);
const astcenc_swizzle swizzle = {
ASTCENC_SWZ_R, ASTCENC_SWZ_G, ASTCENC_SWZ_B, ASTCENC_SWZ_A
};
status = astcenc_decompress_image(context, mip_data, src_size, &image, &swizzle, 0);
ERR<fim_middle>int dst_mip_w, dst_mip_h;<fim_end>
|
_size(width, height, target_format, has_mipmaps);
Vector<uint8_t> dest_data;
dest_data.resize(dest_size);
uint8_t *dest_write = dest_data.ptrw();
// Decompress image.
const int mip_count = has_mipmaps ? Image::get_image_required_mipmaps(width, height, target_format) : 0;
const uint8_t *src_data = r_img->ptr();
for (int i = 0; i < mip_count + 1; i++) {
const int64_t src_ofs = Image::get_image_mipmap_offset(width, height, src_format, i);
const uint8_t *mip_data = &src_data[src_ofs];
int64_t src_size;
if (i == mip_count) {
src_size = r_img->get_data_size() - src_ofs;
} else {
src_size = Image::get_image_mipmap_offset(width, height, src_format, i + 1) - src_ofs;
}
|
int dst_mip_w, dst_mip_h;
|
const int64_t dst_ofs = Image::get_image_mipmap_offset_and_dimensions(width, height, target_format, i, dst_mip_w, dst_mip_h);
// Ensure that mip offset is a multiple of 8 (etcpak expects uint64_t pointer).
ERR_FAIL_COND(dst_ofs % 8 != 0);
uint8_t *dest_mip_write = &dest_write[dst_ofs];
astcenc_image image;
image.dim_x = dst_mip_w;
image.dim_y = dst_mip_h;
image.dim_z = 1;
image.data_type = is_hdr ? ASTCENC_TYPE_F16 : ASTCENC_TYPE_U8;
image.data = (void **)(&dest_mip_write);
const astcenc_swizzle swizzle = {
ASTCENC_SWZ_R, ASTCENC_SWZ_G, ASTCENC_SWZ_B, ASTCENC_SWZ_A
};
status = astcenc_decompress_image(context, mip_data, src_size, &image, &swizzle, 0);
ERR
|
ast_based
|
<fim_prefix> if (!buf) {
LLAMA_LOG_ERROR("%s: failed to allocate buffer for control vector\n", __func__);
return false;
}
ggml_backend_buffer_clear(buf, 0);
bufs.emplace_back(buf);
}
return true;
}
bool llama_adapter_cvec::apply(
const llama_model & model,
const float * data,
size_t len,
int32_t n_embd,
int32_t il_start,
int32_t il_end) {
const auto & hparams = model.hparams;
if (data == nullptr) {
// disable the current control vector (but leave allocated for later)
layer_start = -1;
layer_end = -1;<fim_suffix> }
if (n_embd != (int) hparams.n_embd) {
LLAMA_LOG_ERROR("%s: control vector n_embd does not match model\n", __func__);
return false;
}
if (tensors.empty()) {
if (!init(model)) {
return false;
}
}
layer_start = il_start;
layer_end = il_end;
for (size_t il = 1; il < hparams.n_layer; il++) {
assert(tensors[il] != nullptr);
const size_t off = n_embd * (il - 1); // buffer doesn't have data for layer 0, since it's never present
if (off + n_embd <= len) {
ggml_backend_tensor_set(tensors[il], data + off, 0, n_embd * ggml_element_size(tensors[il]));
}
}
return true;<fim_middle> return true;<fim_end>
|
if (!buf) {
LLAMA_LOG_ERROR("%s: failed to allocate buffer for control vector\n", __func__);
return false;
}
ggml_backend_buffer_clear(buf, 0);
bufs.emplace_back(buf);
}
return true;
}
bool llama_adapter_cvec::apply(
const llama_model & model,
const float * data,
size_t len,
int32_t n_embd,
int32_t il_start,
int32_t il_end) {
const auto & hparams = model.hparams;
if (data == nullptr) {
// disable the current control vector (but leave allocated for later)
layer_start = -1;
layer_end = -1;
|
return true;
|
}
if (n_embd != (int) hparams.n_embd) {
LLAMA_LOG_ERROR("%s: control vector n_embd does not match model\n", __func__);
return false;
}
if (tensors.empty()) {
if (!init(model)) {
return false;
}
}
layer_start = il_start;
layer_end = il_end;
for (size_t il = 1; il < hparams.n_layer; il++) {
assert(tensors[il] != nullptr);
const size_t off = n_embd * (il - 1); // buffer doesn't have data for layer 0, since it's never present
if (off + n_embd <= len) {
ggml_backend_tensor_set(tensors[il], data + off, 0, n_embd * ggml_element_size(tensors[il]));
}
}
return true;
|
random
|
<fim_prefix> * WARNING! This class points to data held within the TessBaseAPI class, and
* therefore can only be used while the TessBaseAPI class still exists and
* has not been subjected to a call of Init, SetImage, Recognize, Clear, End
* DetectOS, or anything else that changes the internal PAGE_RES.
*/
MutableIterator *TessBaseAPI::GetMutableIterator() {
if (tesseract_ == nullptr || page_res_ == nullptr) {
return nullptr;
}
return new MutableIterator(page_res_, tesseract_, thresholder_->GetScaleFactor(),
thresholder_->GetScaledYResolution(), rect_left_, rect_top_,
rect_width_, rect_height_);
}
<fim_suffix> }
std::string text("");
const std::unique_ptr</*non-const*/ ResultIterator> it(GetIterator());
do {
if (it->Empty(RIL_PARA)) {
continue;
}
auto block_type = it->BlockType();
switch (block_type) {
case PT_FLOWING_IMAGE:
case PT_HEADING_IMAGE:
case PT_PULLOUT_IMAGE:
case PT_HORZ_LINE:
case PT_VERT_LINE:
// Ignore images and lines for text output.
continue;
case PT_NOISE:
tprintf("TODO: Please report image which triggers the noise case.\n");
ASSERT_HOST(false);
default:
break;
}
const std::unique_ptr<const char[]> para_text(it->GetUTF8Text(RIL_PARA));<fim_middle>/** Make a text string from the internal data structures. */
char *TessBaseAPI::GetUTF8Text() {
if (tesseract_ == nullptr || (!recognition_done_ && Recognize(nullptr) < 0)) {
return nullptr;<fim_end>
|
* WARNING! This class points to data held within the TessBaseAPI class, and
* therefore can only be used while the TessBaseAPI class still exists and
* has not been subjected to a call of Init, SetImage, Recognize, Clear, End
* DetectOS, or anything else that changes the internal PAGE_RES.
*/
MutableIterator *TessBaseAPI::GetMutableIterator() {
if (tesseract_ == nullptr || page_res_ == nullptr) {
return nullptr;
}
return new MutableIterator(page_res_, tesseract_, thresholder_->GetScaleFactor(),
thresholder_->GetScaledYResolution(), rect_left_, rect_top_,
rect_width_, rect_height_);
}
|
/** Make a text string from the internal data structures. */
char *TessBaseAPI::GetUTF8Text() {
if (tesseract_ == nullptr || (!recognition_done_ && Recognize(nullptr) < 0)) {
return nullptr;
|
}
std::string text("");
const std::unique_ptr</*non-const*/ ResultIterator> it(GetIterator());
do {
if (it->Empty(RIL_PARA)) {
continue;
}
auto block_type = it->BlockType();
switch (block_type) {
case PT_FLOWING_IMAGE:
case PT_HEADING_IMAGE:
case PT_PULLOUT_IMAGE:
case PT_HORZ_LINE:
case PT_VERT_LINE:
// Ignore images and lines for text output.
continue;
case PT_NOISE:
tprintf("TODO: Please report image which triggers the noise case.\n");
ASSERT_HOST(false);
default:
break;
}
const std::unique_ptr<const char[]> para_text(it->GetUTF8Text(RIL_PARA));
|
random
|
<fim_prefix>
r_options->push_back(String(name).quote());
}
}
}
Node2D::get_argument_options(p_function, p_idx, r_options);
}
#endif // TOOLS_ENABLED
#ifndef DISABLE_DEPRECATED
bool AnimatedSprite2D::_set(const StringName &p_name, const Variant &p_value) {
if ((p_name == SNAME("frames"))) {
set_sprite_frames(p_value);
return true;
}
return false;
}
#endif
void AnimatedSprite2D::_bind_methods() {
ClassDB::bind_method(D_METHOD("set_sprite_frames", "sprite_frames"), &AnimatedSprite2D::set_sprite_frames);
ClassDB::bind_method(D_METHOD("get_sprite_frames"), &AnimatedSprite2D::get_sprite_frames);
ClassDB::bind_method(D_METHOD("set_animation", "name"), &AnimatedSprite2D::set_animation);
<fim_suffix>;
ClassDB::bind_method(D_METHOD("set_autoplay", "name"), &AnimatedSprite2D::set_autoplay);
ClassDB::bind_method(D_METHOD("get_autoplay"), &AnimatedSprite2D::get_autoplay);
ClassDB::bind_method(D_METHOD("is_playing"), &AnimatedSprite2D::is_playing);
ClassDB::bind_method(D_METHOD("play", "name", "custom_speed", "from_end"), &AnimatedSprite2D::play, DEFVAL(StringName()), DEFVAL(1.0), DEFVAL(false));
ClassDB::bind_method(D_METHOD("play_backwards", "name"), &AnimatedSprite2D::play_backwards, DEFVAL(StringName()));
ClassDB::bind_method(D_METHOD("pause"), &AnimatedSprite2D::pause);
ClassDB::bind_method(D_METHOD("stop"), &AnimatedSprite2D::stop);
ClassDB::bind_method(D_METHOD("set_center<fim_middle>ClassDB::bind_method(D_METHOD("get_animation"), &AnimatedSprite2D::get_animation)<fim_end>
|
r_options->push_back(String(name).quote());
}
}
}
Node2D::get_argument_options(p_function, p_idx, r_options);
}
#endif // TOOLS_ENABLED
#ifndef DISABLE_DEPRECATED
bool AnimatedSprite2D::_set(const StringName &p_name, const Variant &p_value) {
if ((p_name == SNAME("frames"))) {
set_sprite_frames(p_value);
return true;
}
return false;
}
#endif
void AnimatedSprite2D::_bind_methods() {
ClassDB::bind_method(D_METHOD("set_sprite_frames", "sprite_frames"), &AnimatedSprite2D::set_sprite_frames);
ClassDB::bind_method(D_METHOD("get_sprite_frames"), &AnimatedSprite2D::get_sprite_frames);
ClassDB::bind_method(D_METHOD("set_animation", "name"), &AnimatedSprite2D::set_animation);
|
ClassDB::bind_method(D_METHOD("get_animation"), &AnimatedSprite2D::get_animation)
|
;
ClassDB::bind_method(D_METHOD("set_autoplay", "name"), &AnimatedSprite2D::set_autoplay);
ClassDB::bind_method(D_METHOD("get_autoplay"), &AnimatedSprite2D::get_autoplay);
ClassDB::bind_method(D_METHOD("is_playing"), &AnimatedSprite2D::is_playing);
ClassDB::bind_method(D_METHOD("play", "name", "custom_speed", "from_end"), &AnimatedSprite2D::play, DEFVAL(StringName()), DEFVAL(1.0), DEFVAL(false));
ClassDB::bind_method(D_METHOD("play_backwards", "name"), &AnimatedSprite2D::play_backwards, DEFVAL(StringName()));
ClassDB::bind_method(D_METHOD("pause"), &AnimatedSprite2D::pause);
ClassDB::bind_method(D_METHOD("stop"), &AnimatedSprite2D::stop);
ClassDB::bind_method(D_METHOD("set_center
|
ast_based
|
<fim_prefix> ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_foreground_color(ae->node, p_color.to_rgba32());
}
Error AccessibilityDriverAccessKit::init() {
#ifdef ACCESSKIT_DYNAMIC
#ifdef DEBUG_ENABLED
int dylibloader_verbose = 1;
#else
int dylibloader_verbose = 0;
#endif
void *library_handle = nullptr;
String path;
String arch = Engine::get_singleton()->get_architecture_name();
#ifdef LINUXBSD_ENABLED
path = OS::get_singleton()->get_executable_path().get_base_dir().path_join("libaccesskit." + arch + ".so");
if (!FileAccess::exists(path)) {
path = OS::get_singleton()->get_executable_path().get_base_dir().path_join("../lib").path_join("libaccesskit." + arch + ".so");
}<fim_suffix> path = OS::get_singleton()->get_executable_path().get_base_dir().path_join("libaccesskit.so");
}
if (!FileAccess::exists(path)) {
path = OS::get_singleton()->get_executable_path().get_base_dir().path_join("../lib").path_join("libaccesskit.so");
}
if (!FileAccess::exists(path)) {
return ERR_CANT_CREATE;
}
#endif
#ifdef MACOS_ENABLED
path = OS::get_singleton()->get_executable_path().get_base_dir().path_join("libaccesskit." + arch + ".dylib");
if (!FileAccess::exists(path)) {
path = OS::get_singleton()->get_executable_path().get_base_dir().path_join("../Frameworks").path_join("libaccesskit." + arch + ".dylib");
}
if (!FileAccess::exists(path)) {<fim_middle> if (!FileAccess::exists(path)) {<fim_end>
|
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_foreground_color(ae->node, p_color.to_rgba32());
}
Error AccessibilityDriverAccessKit::init() {
#ifdef ACCESSKIT_DYNAMIC
#ifdef DEBUG_ENABLED
int dylibloader_verbose = 1;
#else
int dylibloader_verbose = 0;
#endif
void *library_handle = nullptr;
String path;
String arch = Engine::get_singleton()->get_architecture_name();
#ifdef LINUXBSD_ENABLED
path = OS::get_singleton()->get_executable_path().get_base_dir().path_join("libaccesskit." + arch + ".so");
if (!FileAccess::exists(path)) {
path = OS::get_singleton()->get_executable_path().get_base_dir().path_join("../lib").path_join("libaccesskit." + arch + ".so");
}
|
if (!FileAccess::exists(path)) {
|
path = OS::get_singleton()->get_executable_path().get_base_dir().path_join("libaccesskit.so");
}
if (!FileAccess::exists(path)) {
path = OS::get_singleton()->get_executable_path().get_base_dir().path_join("../lib").path_join("libaccesskit.so");
}
if (!FileAccess::exists(path)) {
return ERR_CANT_CREATE;
}
#endif
#ifdef MACOS_ENABLED
path = OS::get_singleton()->get_executable_path().get_base_dir().path_join("libaccesskit." + arch + ".dylib");
if (!FileAccess::exists(path)) {
path = OS::get_singleton()->get_executable_path().get_base_dir().path_join("../Frameworks").path_join("libaccesskit." + arch + ".dylib");
}
if (!FileAccess::exists(path)) {
|
random
|
<fim_prefix>onst;
Dictionary get_donor_info() const;
Dictionary get_license_info() const;
String get_license_text() const;
void set_write_movie_path(const String &p_path);
String get_write_movie_path() const;
String get_architecture_name() const;
void set_shader_cache_path(const String &p_path);
String get_shader_cache_path() const;
bool is_abort_on_gpu_errors_enabled() const;
bool is_validation_layers_enabled() const;
bool is_generate_spirv_debug_info_enabled() const;
bool is_extra_gpu_memory_tracking_enabled() const;
#if defined(DEBUG_ENABLED) || defined(DEV_ENABLED)
bool is_accurate_breadcrumbs_enabled() const;
#endif
int32_t get_gpu_index() const;
void increment_frames_drawn();
<fim_suffix>
void set_freeze_time_scale(bool p_frozen);
void set_embedded_in_editor(bool p_enabled);
bool is_embedded_in_editor() const;
Engine();
virtual ~Engine();
};
<fim_middle>bool notify_frame_server_synced();<fim_end>
|
onst;
Dictionary get_donor_info() const;
Dictionary get_license_info() const;
String get_license_text() const;
void set_write_movie_path(const String &p_path);
String get_write_movie_path() const;
String get_architecture_name() const;
void set_shader_cache_path(const String &p_path);
String get_shader_cache_path() const;
bool is_abort_on_gpu_errors_enabled() const;
bool is_validation_layers_enabled() const;
bool is_generate_spirv_debug_info_enabled() const;
bool is_extra_gpu_memory_tracking_enabled() const;
#if defined(DEBUG_ENABLED) || defined(DEV_ENABLED)
bool is_accurate_breadcrumbs_enabled() const;
#endif
int32_t get_gpu_index() const;
void increment_frames_drawn();
|
bool notify_frame_server_synced();
|
void set_freeze_time_scale(bool p_frozen);
void set_embedded_in_editor(bool p_enabled);
bool is_embedded_in_editor() const;
Engine();
virtual ~Engine();
};
|
ast_based
|
<fim_prefix>/
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "src/core/credentials/call/call_credentials.h"
#include <grpc/support/port_platform.h>
#include <grpcpp/security/credentials.h>
#include "absl/strings/str_cat.h"
#include "src/core/util/grpc_check.h"
namespace grpc {
CallCredentials::CallCredentials(grpc_call_credentials* c_creds)
: c_creds_(c_creds) {
<fim_suffix>
}
CallCredentials::~CallCredentials() { grpc_call_credentials_release(c_creds_); }
grpc::string CallCredentials::DebugString() {
return absl::StrCat("CallCredentials{", c_creds_->debug_string(), "}");
}
bool CallCredentials::ApplyToCall(grpc_call* call) {
return grpc_call_set_credentials(call, c_creds_) == GRPC_CALL_OK;
}
} // namespace grpc
<fim_middle>GRPC_CHECK_NE(c_creds, nullptr);<fim_end>
|
/
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "src/core/credentials/call/call_credentials.h"
#include <grpc/support/port_platform.h>
#include <grpcpp/security/credentials.h>
#include "absl/strings/str_cat.h"
#include "src/core/util/grpc_check.h"
namespace grpc {
CallCredentials::CallCredentials(grpc_call_credentials* c_creds)
: c_creds_(c_creds) {
|
GRPC_CHECK_NE(c_creds, nullptr);
|
}
CallCredentials::~CallCredentials() { grpc_call_credentials_release(c_creds_); }
grpc::string CallCredentials::DebugString() {
return absl::StrCat("CallCredentials{", c_creds_->debug_string(), "}");
}
bool CallCredentials::ApplyToCall(grpc_call* call) {
return grpc_call_set_credentials(call, c_creds_) == GRPC_CALL_OK;
}
} // namespace grpc
|
ast_based
|
<fim_prefix>#include "dlhandle.h"
#include <string>
#ifndef _WIN32
# include <dlfcn.h>
#else
# include <cassert>
# include <sstream>
# define WIN32_LEAN_AND_MEAN
# ifndef NOMINMAX
# define NOMINMAX
# endif
# include <windows.h>
#endif
using namespace std::string_literals;
namespace fs = std::filesystem;
#ifndef _WIN32
Dlhandle::Dlhandle(const fs::path &fpath)
{
chandle = dlopen(fpath.c_str(), RTLD_LAZY | RTLD_LOCAL);
if (!chandle) {
throw Exception("dlopen: "s + dlerror());
}
}
Dlhandle::~Dlhandle()
{
if (chandle) <fim_suffix>
}
void *Dlhandle::get_internal(const char *symbol) const
{
return dlsym(chandle, symbol);
}
#else // defined(_WIN32)
Dlhandle::Dlhandle(const fs::path &fpath)
{
fs::path afpath = fs::absolute(fpath);
// Suppress the "Entry Point Not Found" dialog, caused by outdated nvcuda.dll from the GPU driver
UINT lastErrorMode = GetErrorMode();
SetErrorMode(lastErrorMode | SEM_FAILCRITICALERRORS);
chandle = LoadLibraryExW(afpath.c_str(), NULL, LOAD_LIBRARY_SEARCH_DEFAULT_DIRS | LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR);
SetErrorMode(lastErrorMode);
if (!chandle) {
DWORD err = GetLastError();
std::ostringstream ss;
ss << "LoadLibraryExW failed with <fim_middle>dlclose(chandle);<fim_end>
|
#include "dlhandle.h"
#include <string>
#ifndef _WIN32
# include <dlfcn.h>
#else
# include <cassert>
# include <sstream>
# define WIN32_LEAN_AND_MEAN
# ifndef NOMINMAX
# define NOMINMAX
# endif
# include <windows.h>
#endif
using namespace std::string_literals;
namespace fs = std::filesystem;
#ifndef _WIN32
Dlhandle::Dlhandle(const fs::path &fpath)
{
chandle = dlopen(fpath.c_str(), RTLD_LAZY | RTLD_LOCAL);
if (!chandle) {
throw Exception("dlopen: "s + dlerror());
}
}
Dlhandle::~Dlhandle()
{
if (chandle)
|
dlclose(chandle);
|
}
void *Dlhandle::get_internal(const char *symbol) const
{
return dlsym(chandle, symbol);
}
#else // defined(_WIN32)
Dlhandle::Dlhandle(const fs::path &fpath)
{
fs::path afpath = fs::absolute(fpath);
// Suppress the "Entry Point Not Found" dialog, caused by outdated nvcuda.dll from the GPU driver
UINT lastErrorMode = GetErrorMode();
SetErrorMode(lastErrorMode | SEM_FAILCRITICALERRORS);
chandle = LoadLibraryExW(afpath.c_str(), NULL, LOAD_LIBRARY_SEARCH_DEFAULT_DIRS | LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR);
SetErrorMode(lastErrorMode);
if (!chandle) {
DWORD err = GetLastError();
std::ostringstream ss;
ss << "LoadLibraryExW failed with
|
ast_based
|
<fim_prefix> undo_redo->add_do_method(animation.ptr(), "track_insert_key", E->get().first, dst_time, animation->track_get_key_value(E->get().first, E->get().second), animation->track_get_key_transition(E->get().first, E->get().second));
undo_redo->add_undo_method(animation.ptr(), "track_remove_key_at_time", E->get().first, dst_time);
Pair<int, real_t> p;
p.first = E->get().first;
p.second = dst_time;
new_selection_values.push_back(p);
if (existing_idx != -1) {
undo_redo->add_undo_method(animation.ptr(), "track_insert_key", E->get().first, dst_time, animation->track_get_key_value(E->get().first, existing_idx), animation->track_get_key_transition(E->get().first, existing_idx));
}
}
<fim_suffix>;
undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation);
// Reselect duplicated.
int i = 0;
for (const Pair<int, real_t> &E : new_selection_values) {
undo_redo->add_do_method(this, "_select_at_anim", animation, E.first, E.second, i == 0);
i++;
}
i = 0;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t time = animation->track_get_key_time(E->get().first, E->get().second);
undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, time, i == 0);
i++;
}
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo-><fim_middle>undo_redo->add_do_method(this, "_clear_selection_for_anim", animation)<fim_end>
|
undo_redo->add_do_method(animation.ptr(), "track_insert_key", E->get().first, dst_time, animation->track_get_key_value(E->get().first, E->get().second), animation->track_get_key_transition(E->get().first, E->get().second));
undo_redo->add_undo_method(animation.ptr(), "track_remove_key_at_time", E->get().first, dst_time);
Pair<int, real_t> p;
p.first = E->get().first;
p.second = dst_time;
new_selection_values.push_back(p);
if (existing_idx != -1) {
undo_redo->add_undo_method(animation.ptr(), "track_insert_key", E->get().first, dst_time, animation->track_get_key_value(E->get().first, existing_idx), animation->track_get_key_transition(E->get().first, existing_idx));
}
}
|
undo_redo->add_do_method(this, "_clear_selection_for_anim", animation)
|
;
undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation);
// Reselect duplicated.
int i = 0;
for (const Pair<int, real_t> &E : new_selection_values) {
undo_redo->add_do_method(this, "_select_at_anim", animation, E.first, E.second, i == 0);
i++;
}
i = 0;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t time = animation->track_get_key_time(E->get().first, E->get().second);
undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, time, i == 0);
i++;
}
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->
|
ast_based
|
<fim_prefix> absl::Status Run(const FeedType& inputs,
const std::vector<Output>& fetch_outputs,
std::vector<Tensor>* outputs) const;
/// Same as above. Additionally runs the operations ins `run_outputs`.
absl::Status Run(const FeedType& inputs,
const std::vector<Output>& fetch_outputs,
const std::vector<Operation>& run_outputs,
std::vector<Tensor>* outputs) const;
/// Use `run_options` to turn on performance profiling. `run_metadata`, if not
/// null, is filled in with the profiling results.
absl::Status Run(const RunOptions& run_options, const FeedType& inputs,<fim_suffix> const std::vector<Operation>& run_outputs,
std::vector<Tensor>* outputs,
RunMetadata* run_metadata) const;
/// Same as above. Additionally allows user to provide custom threadpool
/// implementation via ThreadPoolOptions.
absl::Status Run(const RunOptions& run_options, const FeedType& inputs,
const std::vector<Output>& fetch_outputs,
const std::vector<Operation>& run_outputs,
std::vector<Tensor>* outputs, RunMetadata* run_metadata,
const thread::ThreadPoolOptions& threadpool_options) const;
/// \brief A handle to a subgraph, created with<fim_middle> const std::vector<Output>& fetch_outputs,<fim_end>
|
absl::Status Run(const FeedType& inputs,
const std::vector<Output>& fetch_outputs,
std::vector<Tensor>* outputs) const;
/// Same as above. Additionally runs the operations ins `run_outputs`.
absl::Status Run(const FeedType& inputs,
const std::vector<Output>& fetch_outputs,
const std::vector<Operation>& run_outputs,
std::vector<Tensor>* outputs) const;
/// Use `run_options` to turn on performance profiling. `run_metadata`, if not
/// null, is filled in with the profiling results.
absl::Status Run(const RunOptions& run_options, const FeedType& inputs,
|
const std::vector<Output>& fetch_outputs,
|
const std::vector<Operation>& run_outputs,
std::vector<Tensor>* outputs,
RunMetadata* run_metadata) const;
/// Same as above. Additionally allows user to provide custom threadpool
/// implementation via ThreadPoolOptions.
absl::Status Run(const RunOptions& run_options, const FeedType& inputs,
const std::vector<Output>& fetch_outputs,
const std::vector<Operation>& run_outputs,
std::vector<Tensor>* outputs, RunMetadata* run_metadata,
const thread::ThreadPoolOptions& threadpool_options) const;
/// \brief A handle to a subgraph, created with
|
random
|
<fim_prefix> " but data path is undefined\n");
delete osd_tesseract_;
osd_tesseract_ = nullptr;
} else if (osd_tesseract_->init_tesseract(datapath_, "", "osd", OEM_TESSERACT_ONLY,
nullptr, 0, nullptr, nullptr, false, &mgr) == 0) {
osd_tess = osd_tesseract_;
osd_tesseract_->set_source_resolution(thresholder_->GetSourceYResolution());
} else {
tprintf(
"Warning: Auto orientation and script detection requested,"
" but osd language failed to load\n");
delete osd_tesseract_;
osd_tesseract_ = nullptr;
}
}
}
#endif // ndef DISABLED_LEGACY_ENGINE
<fim_suffix>
// If Devanagari is being recognized, we use different images for page seg
// and for OCR.
tesseract_->PrepareForTessOCR(block_list_, osd_tess, &osr);
return 0;
}
/**
* Return average gradient of lines on page.
*/
float TessBaseAPI::GetGradient() {
return tesseract_->gradient();
}
/** Delete the pageres and clear the block list ready for a new page. */
void TessBaseAPI::ClearResults() {
if (tesseract_ != nullptr) {
tesseract_->Clear();
}
delete page_res_;
page_res_ = nullptr;
recognition_done_ = false;
if (block_list_ == nullptr) {
block_list_ = new BLOCK_LIST;
} else {
block_list_->clear();
}
if (paragraph_models_ != nullptr) {<fim_middle> if (tesseract_->SegmentPage(input_file_.c_str(), block_list_, osd_tess, &osr) < 0) {
return -1;
}<fim_end>
|
" but data path is undefined\n");
delete osd_tesseract_;
osd_tesseract_ = nullptr;
} else if (osd_tesseract_->init_tesseract(datapath_, "", "osd", OEM_TESSERACT_ONLY,
nullptr, 0, nullptr, nullptr, false, &mgr) == 0) {
osd_tess = osd_tesseract_;
osd_tesseract_->set_source_resolution(thresholder_->GetSourceYResolution());
} else {
tprintf(
"Warning: Auto orientation and script detection requested,"
" but osd language failed to load\n");
delete osd_tesseract_;
osd_tesseract_ = nullptr;
}
}
}
#endif // ndef DISABLED_LEGACY_ENGINE
|
if (tesseract_->SegmentPage(input_file_.c_str(), block_list_, osd_tess, &osr) < 0) {
return -1;
}
|
// If Devanagari is being recognized, we use different images for page seg
// and for OCR.
tesseract_->PrepareForTessOCR(block_list_, osd_tess, &osr);
return 0;
}
/**
* Return average gradient of lines on page.
*/
float TessBaseAPI::GetGradient() {
return tesseract_->gradient();
}
/** Delete the pageres and clear the block list ready for a new page. */
void TessBaseAPI::ClearResults() {
if (tesseract_ != nullptr) {
tesseract_->Clear();
}
delete page_res_;
page_res_ = nullptr;
recognition_done_ = false;
if (block_list_ == nullptr) {
block_list_ = new BLOCK_LIST;
} else {
block_list_->clear();
}
if (paragraph_models_ != nullptr) {
|
random
|
<fim_prefix>
llama_model_params model_params = common_model_params_to_llama(params);
llama_model * model = llama_model_load_from_file(params.model.path.c_str(), model_params);
if (model == NULL) {
LOG_ERR("%s: error: unable to load model\n" , __func__);
return 1;
}
const llama_vocab * vocab = llama_model_get_vocab(model);
// tokenize the prompt
std::vector<llama_token> tokens_list;
tokens_list = common_tokenize(vocab, params.prompt, true);
const int n_kv_req = tokens_list.size() + (n_predict - tokens_list.size())*n_parallel;
// initialize the context
llama_context_params ctx_params = common_context_params_to_llama(params);<fim_suffix>
llama_context * ctx = llama_init_from_model(model, ctx_params);
auto sparams = llama_sampler_chain_default_params();
sparams.no_perf = false;
llama_sampler * smpl = llama_sampler_chain_init(sparams);
llama_sampler_chain_add(smpl, llama_sampler_init_top_k(params.sampling.top_k));
llama_sampler_chain_add(smpl, llama_sampler_init_top_p(params.sampling.top_p, params.sampling.min_keep));
llama_sampler_chain_add(smpl, llama_sampler_init_temp (params.sampling.temp));
llama_sampler_chain_add(smpl, llama_sampler_init_dist (params.sampling.seed));
if (ctx == NULL) {
LOG_ERR("%s: error: failed to create the llama_context\n" , __func__);<fim_middle>
ctx_params.n_ctx = n_kv_req;
ctx_params.n_batch = std::max(n_predict, n_parallel);<fim_end>
|
llama_model_params model_params = common_model_params_to_llama(params);
llama_model * model = llama_model_load_from_file(params.model.path.c_str(), model_params);
if (model == NULL) {
LOG_ERR("%s: error: unable to load model\n" , __func__);
return 1;
}
const llama_vocab * vocab = llama_model_get_vocab(model);
// tokenize the prompt
std::vector<llama_token> tokens_list;
tokens_list = common_tokenize(vocab, params.prompt, true);
const int n_kv_req = tokens_list.size() + (n_predict - tokens_list.size())*n_parallel;
// initialize the context
llama_context_params ctx_params = common_context_params_to_llama(params);
|
ctx_params.n_ctx = n_kv_req;
ctx_params.n_batch = std::max(n_predict, n_parallel);
|
llama_context * ctx = llama_init_from_model(model, ctx_params);
auto sparams = llama_sampler_chain_default_params();
sparams.no_perf = false;
llama_sampler * smpl = llama_sampler_chain_init(sparams);
llama_sampler_chain_add(smpl, llama_sampler_init_top_k(params.sampling.top_k));
llama_sampler_chain_add(smpl, llama_sampler_init_top_p(params.sampling.top_p, params.sampling.min_keep));
llama_sampler_chain_add(smpl, llama_sampler_init_temp (params.sampling.temp));
llama_sampler_chain_add(smpl, llama_sampler_init_dist (params.sampling.seed));
if (ctx == NULL) {
LOG_ERR("%s: error: failed to create the llama_context\n" , __func__);
|
random
|
<fim_prefix>lear_selection"), &AnimationBezierTrackEdit::_clear_selection);
ClassDB::bind_method(D_METHOD("_clear_selection_for_anim"), &AnimationBezierTrackEdit::_clear_selection_for_anim);
ClassDB::bind_method(D_METHOD("_select_at_anim"), &AnimationBezierTrackEdit::_select_at_anim);
ClassDB::bind_method(D_METHOD("_update_hidden_tracks_after"), &AnimationBezierTrackEdit::_update_hidden_tracks_after);
ClassDB::bind_method(D_METHOD("_update_locked_tracks_after"), &AnimationBezierTrackEdit::_update_locked_tracks_after);
ClassDB::bind_method(D_METHOD("_bezier_track_insert_key_at_anim"), &AnimationBezierTrackEdit::_bezier_track_insert_key_at_anim, DEFVAL(Animation::HANDLE_SET_MODE_NONE));
ADD_SIGNAL(<fim_suffix>);
ADD_SIGNAL(MethodInfo("deselect_key", PropertyInfo(Variant::INT, "index"), PropertyInfo(Variant::INT, "track")));
ADD_SIGNAL(MethodInfo("clear_selection"));
ADD_SIGNAL(MethodInfo("timeline_changed", PropertyInfo(Variant::FLOAT, "position"), PropertyInfo(Variant::BOOL, "timeline_only")));
}
AnimationBezierTrackEdit::AnimationBezierTrackEdit() {
panner.instantiate();
panner->set_callbacks(callable_mp(this, &AnimationBezierTrackEdit::_pan_callback), callable_mp(this, &AnimationBezierTrackEdit::_zoom_callback));
play_position = memnew(Control);
play_position->set_mouse_filter(MOUSE_FILTER_PASS);
add_child(play_position);
play_position->set_anchors_and_offsets_preset(PRESET_FULL_REC<fim_middle>MethodInfo("select_key", PropertyInfo(Variant::INT, "index"), PropertyInfo(Variant::BOOL, "single"), PropertyInfo(Variant::INT, "track"))<fim_end>
|
lear_selection"), &AnimationBezierTrackEdit::_clear_selection);
ClassDB::bind_method(D_METHOD("_clear_selection_for_anim"), &AnimationBezierTrackEdit::_clear_selection_for_anim);
ClassDB::bind_method(D_METHOD("_select_at_anim"), &AnimationBezierTrackEdit::_select_at_anim);
ClassDB::bind_method(D_METHOD("_update_hidden_tracks_after"), &AnimationBezierTrackEdit::_update_hidden_tracks_after);
ClassDB::bind_method(D_METHOD("_update_locked_tracks_after"), &AnimationBezierTrackEdit::_update_locked_tracks_after);
ClassDB::bind_method(D_METHOD("_bezier_track_insert_key_at_anim"), &AnimationBezierTrackEdit::_bezier_track_insert_key_at_anim, DEFVAL(Animation::HANDLE_SET_MODE_NONE));
ADD_SIGNAL(
|
MethodInfo("select_key", PropertyInfo(Variant::INT, "index"), PropertyInfo(Variant::BOOL, "single"), PropertyInfo(Variant::INT, "track"))
|
);
ADD_SIGNAL(MethodInfo("deselect_key", PropertyInfo(Variant::INT, "index"), PropertyInfo(Variant::INT, "track")));
ADD_SIGNAL(MethodInfo("clear_selection"));
ADD_SIGNAL(MethodInfo("timeline_changed", PropertyInfo(Variant::FLOAT, "position"), PropertyInfo(Variant::BOOL, "timeline_only")));
}
AnimationBezierTrackEdit::AnimationBezierTrackEdit() {
panner.instantiate();
panner->set_callbacks(callable_mp(this, &AnimationBezierTrackEdit::_pan_callback), callable_mp(this, &AnimationBezierTrackEdit::_zoom_callback));
play_position = memnew(Control);
play_position->set_mouse_filter(MOUSE_FILTER_PASS);
add_child(play_position);
play_position->set_anchors_and_offsets_preset(PRESET_FULL_REC
|
ast_based
|
<fim_prefix>
offset_n += moving_selection_offset.x;
height_n += moving_selection_offset.y;
} else if (scaling_selection) {
offset_n += -scaling_selection_offset.x + (offset_n - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
height_n += -scaling_selection_offset.y + (height_n - scaling_selection_pivot.y) * (scaling_selection_scale.y - 1);
}
}
if (moving_inserted_key && moving_selection_from_track == p_track) {
if (moving_selection_from_key == i) {
Animation::HandleMode handle_mode = animation->bezier_track_get_key_handle_mode(p_track, i);
if (handle_mode != Animation::HANDLE_MODE_FREE) {
float offset_p = offset;
float height_p = height;
<fim_suffix>
animation->bezier_track_calculate_handles(offset, offset_p, height_p, offset_n, height_n, handle_mode, Animation::HANDLE_SET_MODE_AUTO, nullptr, &out_handle);
}
} else if (moving_selection_from_key == i_n) {
Animation::HandleMode handle_mode = animation->bezier_track_get_key_handle_mode(p_track, i_n);
if (handle_mode != Animation::HANDLE_MODE_FREE) {
float offset_nn = offset_n;
float height_nn = height_n;
if (E->next()->next()) {
int i_nn = E->next()->next()->get();
offset_nn = animation->track_get_key_time(p_track, i_nn);
height_nn = animation->bezier_track_get_key_value(p_track, i_nn);
}
animation->bezier_track_calculate_han<fim_middle>if (E->prev()) {
int i_p = E->prev()->get();
offset_p = animation->track_get_key_time(p_track, i_p);
height_p = animation->bezier_track_get_key_value(p_track, i_p);
}<fim_end>
|
offset_n += moving_selection_offset.x;
height_n += moving_selection_offset.y;
} else if (scaling_selection) {
offset_n += -scaling_selection_offset.x + (offset_n - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
height_n += -scaling_selection_offset.y + (height_n - scaling_selection_pivot.y) * (scaling_selection_scale.y - 1);
}
}
if (moving_inserted_key && moving_selection_from_track == p_track) {
if (moving_selection_from_key == i) {
Animation::HandleMode handle_mode = animation->bezier_track_get_key_handle_mode(p_track, i);
if (handle_mode != Animation::HANDLE_MODE_FREE) {
float offset_p = offset;
float height_p = height;
|
if (E->prev()) {
int i_p = E->prev()->get();
offset_p = animation->track_get_key_time(p_track, i_p);
height_p = animation->bezier_track_get_key_value(p_track, i_p);
}
|
animation->bezier_track_calculate_handles(offset, offset_p, height_p, offset_n, height_n, handle_mode, Animation::HANDLE_SET_MODE_AUTO, nullptr, &out_handle);
}
} else if (moving_selection_from_key == i_n) {
Animation::HandleMode handle_mode = animation->bezier_track_get_key_handle_mode(p_track, i_n);
if (handle_mode != Animation::HANDLE_MODE_FREE) {
float offset_nn = offset_n;
float height_nn = height_n;
if (E->next()->next()) {
int i_nn = E->next()->next()->get();
offset_nn = animation->track_get_key_time(p_track, i_nn);
height_nn = animation->bezier_track_get_key_value(p_track, i_nn);
}
animation->bezier_track_calculate_han
|
ast_based
|
<fim_prefix> LOG_INF("%s: Allocating [%d] x [%d] = [%d] float space for w->wcls\n",__func__,p->vocab_size , p->dim, p->vocab_size * p->dim);
}
}
catch (std::length_error &) {
die("Invalid configuration. Failed to allocate memory for weights");
}
}
static int checkpoint_init_weights(TransformerWeights * w, const Config * p, FILE * f, bool shared_weights) {
if (fread(w->token_embedding_table.data(), sizeof(float), w->token_embedding_table.size(), f) != w->token_embedding_table.size()) return 1;
if (fread(w->rms_att_weight.data(), sizeof(float), w->rms_att_weight.size(), f) != w->rms_att_weight.size()) return 1;<fim_suffix> if (fread(w->wv.data(), sizeof(float), w->wv.size(), f) != w->wv.size()) return 1;
if (fread(w->wo.data(), sizeof(float), w->wo.size(), f) != w->wo.size()) return 1;
if (fread(w->rms_ffn_weight.data(), sizeof(float), w->rms_ffn_weight.size(), f) != w->rms_ffn_weight.size()) return 1;
if (fread(w->w1.data(), sizeof(float), w->w1.size(), f) != w->w1.size()) return 1;
if (fread(w->w2.data(), sizeof(float), w->w2.size(), f) != w->w2.size()) return 1;
if (fread(w->w3.data(), sizeof(float), w->w3.size(), f) != w->w3.size()) return 1;
if (fread(w->rms_final_weight.data(), sizeof(float), w->rms_final_weight.size(), f) != w->rms_final_weight.size()) return 1;
<fim_middle> if (fread(w->wq.data(), sizeof(float), w->wq.size(), f) != w->wq.size()) return 1;
if (fread(w->wk.data(), sizeof(float), w->wk.size(), f) != w->wk.size()) return 1;<fim_end>
|
LOG_INF("%s: Allocating [%d] x [%d] = [%d] float space for w->wcls\n",__func__,p->vocab_size , p->dim, p->vocab_size * p->dim);
}
}
catch (std::length_error &) {
die("Invalid configuration. Failed to allocate memory for weights");
}
}
static int checkpoint_init_weights(TransformerWeights * w, const Config * p, FILE * f, bool shared_weights) {
if (fread(w->token_embedding_table.data(), sizeof(float), w->token_embedding_table.size(), f) != w->token_embedding_table.size()) return 1;
if (fread(w->rms_att_weight.data(), sizeof(float), w->rms_att_weight.size(), f) != w->rms_att_weight.size()) return 1;
|
if (fread(w->wq.data(), sizeof(float), w->wq.size(), f) != w->wq.size()) return 1;
if (fread(w->wk.data(), sizeof(float), w->wk.size(), f) != w->wk.size()) return 1;
|
if (fread(w->wv.data(), sizeof(float), w->wv.size(), f) != w->wv.size()) return 1;
if (fread(w->wo.data(), sizeof(float), w->wo.size(), f) != w->wo.size()) return 1;
if (fread(w->rms_ffn_weight.data(), sizeof(float), w->rms_ffn_weight.size(), f) != w->rms_ffn_weight.size()) return 1;
if (fread(w->w1.data(), sizeof(float), w->w1.size(), f) != w->w1.size()) return 1;
if (fread(w->w2.data(), sizeof(float), w->w2.size(), f) != w->w2.size()) return 1;
if (fread(w->w3.data(), sizeof(float), w->w3.size(), f) != w->w3.size()) return 1;
if (fread(w->rms_final_weight.data(), sizeof(float), w->rms_final_weight.size(), f) != w->rms_final_weight.size()) return 1;
|
random
|
<fim_prefix> accesskit_node_set_bold(ae->node);
}
if (TS->font_get_style(font_rid).has_flag(TextServer::FONT_ITALIC)) {
accesskit_node_set_italic(ae->node);
}
accesskit_node_set_font_weight(ae->node, TS->font_get_weight(font_rid));
}
accesskit_node_set_font_size(ae->node, TS->shaped_get_run_font_size(p_shaped_text, i));
CharString language = TS->shaped_get_run_language(p_shaped_text, i).utf8();
if (language.length() > 0) {
accesskit_node_set_language(ae->node, language.ptr());
}
accesskit_node_set_text_direction(ae->node, ACCESSKIT_TEXT_DIRECTION_LEFT_TO_RIGHT);
accesskit_rect rect;
rect.x0 = run_off_x;
rect.y0 = 0;<fim_suffix> accesskit_node_add_action(ae->node, ACCESSKIT_ACTION_SCROLL_INTO_VIEW);
run_off_x += size_x;
}
{
// Add "\n" at the end.
AccessibilityElement *ae = memnew(AccessibilityElement);
ae->role = ACCESSKIT_ROLE_TEXT_RUN;
ae->window_id = parent_ae->window_id;
ae->parent = root_rid;
ae->run = Vector3i(full_range.y, full_range.y, run_count);
ae->node = accesskit_node_new(ae->role);
text_elements.push_back(ae);
Vector<uint8_t> char_lengths;
char_lengths.push_back(1);
accesskit_node_set_value(ae->node, "\n");
accesskit_node_set_character_lengths(ae->node, char_lengths.size(), char_lengths.ptr());
Vector<float> char_positions;
Vector<float> char_widths;<fim_middle> rect.x1 = run_off_x + size_x;
rect.y1 = text_height;
accesskit_node_set_bounds(ae->node, rect);<fim_end>
|
accesskit_node_set_bold(ae->node);
}
if (TS->font_get_style(font_rid).has_flag(TextServer::FONT_ITALIC)) {
accesskit_node_set_italic(ae->node);
}
accesskit_node_set_font_weight(ae->node, TS->font_get_weight(font_rid));
}
accesskit_node_set_font_size(ae->node, TS->shaped_get_run_font_size(p_shaped_text, i));
CharString language = TS->shaped_get_run_language(p_shaped_text, i).utf8();
if (language.length() > 0) {
accesskit_node_set_language(ae->node, language.ptr());
}
accesskit_node_set_text_direction(ae->node, ACCESSKIT_TEXT_DIRECTION_LEFT_TO_RIGHT);
accesskit_rect rect;
rect.x0 = run_off_x;
rect.y0 = 0;
|
rect.x1 = run_off_x + size_x;
rect.y1 = text_height;
accesskit_node_set_bounds(ae->node, rect);
|
accesskit_node_add_action(ae->node, ACCESSKIT_ACTION_SCROLL_INTO_VIEW);
run_off_x += size_x;
}
{
// Add "\n" at the end.
AccessibilityElement *ae = memnew(AccessibilityElement);
ae->role = ACCESSKIT_ROLE_TEXT_RUN;
ae->window_id = parent_ae->window_id;
ae->parent = root_rid;
ae->run = Vector3i(full_range.y, full_range.y, run_count);
ae->node = accesskit_node_new(ae->role);
text_elements.push_back(ae);
Vector<uint8_t> char_lengths;
char_lengths.push_back(1);
accesskit_node_set_value(ae->node, "\n");
accesskit_node_set_character_lengths(ae->node, char_lengths.size(), char_lengths.ptr());
Vector<float> char_positions;
Vector<float> char_widths;
|
random
|
<fim_prefix> grpc_channel_destroy(c_channel_);
CompletionQueue* callback_cq = callback_cq_.load(std::memory_order_relaxed);
if (callback_cq != nullptr) {
if (grpc_iomgr_run_in_background()) {
// gRPC-core provides the backing needed for the preferred CQ type
callback_cq->Shutdown();
} else {
CompletionQueue::ReleaseCallbackAlternativeCQ(callback_cq);
}
}
}
namespace {
inline grpc_slice SliceFromArray(const char* arr, size_t len) {
return grpc_slice_from_copied_buffer(arr, len);
}
std::string GetChannelInfoField(grpc_channel* channel,
grpc_channel_info* channel_info,
char*** channel_info_field) {<fim_suffix> if (value == nullptr) return "";
std::string result = value;
gpr_free(value);
return result;
}
} // namespace
std::string Channel::GetLoadBalancingPolicyName() const {
grpc_channel_info channel_info;
return GetChannelInfoField(c_channel_, &channel_info,
&channel_info.lb_policy_name);
}
std::string Channel::GetServiceConfigJSON() const {
grpc_channel_info channel_info;
return GetChannelInfoField(c_channel_, &channel_info,
&channel_info.service_config_json);
}
namespace experimental {
void ChannelResetConnectionBackoff(Channel* channel) {
grpc_channel_reset_connect_backoff(channel->c_channel_);
}
<fim_middle> char* value = nullptr;
memset(channel_info, 0, sizeof(*channel_info));
*channel_info_field = &value;
grpc_channel_get_info(channel, channel_info);<fim_end>
|
grpc_channel_destroy(c_channel_);
CompletionQueue* callback_cq = callback_cq_.load(std::memory_order_relaxed);
if (callback_cq != nullptr) {
if (grpc_iomgr_run_in_background()) {
// gRPC-core provides the backing needed for the preferred CQ type
callback_cq->Shutdown();
} else {
CompletionQueue::ReleaseCallbackAlternativeCQ(callback_cq);
}
}
}
namespace {
inline grpc_slice SliceFromArray(const char* arr, size_t len) {
return grpc_slice_from_copied_buffer(arr, len);
}
std::string GetChannelInfoField(grpc_channel* channel,
grpc_channel_info* channel_info,
char*** channel_info_field) {
|
char* value = nullptr;
memset(channel_info, 0, sizeof(*channel_info));
*channel_info_field = &value;
grpc_channel_get_info(channel, channel_info);
|
if (value == nullptr) return "";
std::string result = value;
gpr_free(value);
return result;
}
} // namespace
std::string Channel::GetLoadBalancingPolicyName() const {
grpc_channel_info channel_info;
return GetChannelInfoField(c_channel_, &channel_info,
&channel_info.lb_policy_name);
}
std::string Channel::GetServiceConfigJSON() const {
grpc_channel_info channel_info;
return GetChannelInfoField(c_channel_, &channel_info,
&channel_info.service_config_json);
}
namespace experimental {
void ChannelResetConnectionBackoff(Channel* channel) {
grpc_channel_reset_connect_backoff(channel->c_channel_);
}
|
random
|
<fim_prefix> =*/ n_tensors*ggml_tensor_overhead(),
/*.mem_buffer =*/ NULL,
/*.no_alloc =*/ true,
};
ggml_context * buft_ctx = ggml_init(params);
if (!buft_ctx) {
return nullptr;
}
ctx_map[buft] = buft_ctx;
adapter.ctxs.emplace_back(buft_ctx);
return buft_ctx;
};
return it->second;
};
// bundle lora_a and lora_b into pairs
std::map<std::string, llama_adapter_lora_weight> ab_map;
auto str_endswith = [](const std::string & str, const std::string & suffix) {
return str.size() >= suffix.size() && str.compare(str.size()-suffix.size(), <fim_suffix>, suffix) == 0;
};
for (ggml_tensor * cur = ggml_get_first_tensor(ctx.get()); cur; cur = ggml_get_next_tensor(ctx.get(), cur)) {
std::string name(cur->name);
if (str_endswith(name, ".lora_a")) {
replace_all(name, ".lora_a", "");
if (ab_map.find(name) == ab_map.end()) {
ab_map[name] = llama_adapter_lora_weight(cur, nullptr);
} else {
ab_map[name].a = cur;
}
} else if (str_endswith(name, ".lora_b")) {
replace_all(name, ".lora_b", "");
if (ab_map.find(name) == ab_map.end()) {
ab_map[name] = llama_adapter_lora_weight(nullptr, cur);
}<fim_middle>suffix.size()<fim_end>
|
=*/ n_tensors*ggml_tensor_overhead(),
/*.mem_buffer =*/ NULL,
/*.no_alloc =*/ true,
};
ggml_context * buft_ctx = ggml_init(params);
if (!buft_ctx) {
return nullptr;
}
ctx_map[buft] = buft_ctx;
adapter.ctxs.emplace_back(buft_ctx);
return buft_ctx;
};
return it->second;
};
// bundle lora_a and lora_b into pairs
std::map<std::string, llama_adapter_lora_weight> ab_map;
auto str_endswith = [](const std::string & str, const std::string & suffix) {
return str.size() >= suffix.size() && str.compare(str.size()-suffix.size(),
|
suffix.size()
|
, suffix) == 0;
};
for (ggml_tensor * cur = ggml_get_first_tensor(ctx.get()); cur; cur = ggml_get_next_tensor(ctx.get(), cur)) {
std::string name(cur->name);
if (str_endswith(name, ".lora_a")) {
replace_all(name, ".lora_a", "");
if (ab_map.find(name) == ab_map.end()) {
ab_map[name] = llama_adapter_lora_weight(cur, nullptr);
} else {
ab_map[name].a = cur;
}
} else if (str_endswith(name, ".lora_b")) {
replace_all(name, ".lora_b", "");
if (ab_map.find(name) == ab_map.end()) {
ab_map[name] = llama_adapter_lora_weight(nullptr, cur);
}
|
ast_based
|
<fim_prefix>}
/**
* Get textlines and strips of image regions as a leptonica-style Boxa, Pixa
* pair, in reading order. Enables downstream handling of non-rectangular
* regions.
* Can be called before or after Recognize.
* If blockids is not nullptr, the block-id of each line is also returned as an
* array of one element per line. delete [] after use.
*/
Boxa *TessBaseAPI::GetStrips(Pixa **pixa, int **blockids) {
return GetComponentImages(RIL_TEXTLINE, false, pixa, blockids);
}
/**
* Get the words as a leptonica-style
* Boxa, Pixa pair, in reading order.
* Can be called before or after Recognize.
*/<fim_suffix>}
/**
* Gets the individual connected (text) components (created
* after pages segmentation step, but before recognition)
* as a leptonica-style Boxa, Pixa pair, in reading order.
* Can be called before or after Recognize.
*/
Boxa *TessBaseAPI::GetConnectedComponents(Pixa **pixa) {
return GetComponentImages(RIL_SYMBOL, true, pixa, nullptr);
}
/**
* Get the given level kind of components (block, textline, word etc.) as a
* leptonica-style Boxa, Pixa pair, in reading order.
* Can be called before or after Recognize.
* If blockids is not nullptr, the block-id of each component is also returned
* as an array of one element per component. delete [] after use.<fim_middle>Boxa *TessBaseAPI::GetWords(Pixa **pixa) {
return GetComponentImages(RIL_WORD, true, pixa, nullptr);<fim_end>
|
}
/**
* Get textlines and strips of image regions as a leptonica-style Boxa, Pixa
* pair, in reading order. Enables downstream handling of non-rectangular
* regions.
* Can be called before or after Recognize.
* If blockids is not nullptr, the block-id of each line is also returned as an
* array of one element per line. delete [] after use.
*/
Boxa *TessBaseAPI::GetStrips(Pixa **pixa, int **blockids) {
return GetComponentImages(RIL_TEXTLINE, false, pixa, blockids);
}
/**
* Get the words as a leptonica-style
* Boxa, Pixa pair, in reading order.
* Can be called before or after Recognize.
*/
|
Boxa *TessBaseAPI::GetWords(Pixa **pixa) {
return GetComponentImages(RIL_WORD, true, pixa, nullptr);
|
}
/**
* Gets the individual connected (text) components (created
* after pages segmentation step, but before recognition)
* as a leptonica-style Boxa, Pixa pair, in reading order.
* Can be called before or after Recognize.
*/
Boxa *TessBaseAPI::GetConnectedComponents(Pixa **pixa) {
return GetComponentImages(RIL_SYMBOL, true, pixa, nullptr);
}
/**
* Get the given level kind of components (block, textline, word etc.) as a
* leptonica-style Boxa, Pixa pair, in reading order.
* Can be called before or after Recognize.
* If blockids is not nullptr, the block-id of each component is also returned
* as an array of one element per component. delete [] after use.
|
random
|
<fim_prefix> if (bd->MouseCursorInvisible)
{
al_destroy_mouse_cursor(bd->MouseCursorInvisible);
bd->MouseCursorInvisible = nullptr;
}
}
#if ALLEGRO_HAS_CLIPBOARD
static const char* ImGui_ImplAllegro5_GetClipboardText(ImGuiContext*)
{
ImGui_ImplAllegro5_Data* bd = ImGui_ImplAllegro5_GetBackendData();
if (bd->ClipboardTextData)
al_free(bd->ClipboardTextData);
bd->ClipboardTextData = al_get_clipboard_text(bd->Display);
return bd->ClipboardTextData;
}
static void ImGui_ImplAllegro5_SetClipboardText(ImGuiContext*, const char* text)
{
ImGui_ImplAllegro5_Data* bd = ImGui_ImplAllegro5_GetBackendData();
al_set_clipboard_text(bd->Display, text);
}<fim_suffix>ImGuiKey ImGui_ImplAllegro5_KeyCodeToImGuiKey(int key_code);
ImGuiKey ImGui_ImplAllegro5_KeyCodeToImGuiKey(int key_code)
{
switch (key_code)
{
case ALLEGRO_KEY_TAB: return ImGuiKey_Tab;
case ALLEGRO_KEY_LEFT: return ImGuiKey_LeftArrow;
case ALLEGRO_KEY_RIGHT: return ImGuiKey_RightArrow;
case ALLEGRO_KEY_UP: return ImGuiKey_UpArrow;
case ALLEGRO_KEY_DOWN: return ImGuiKey_DownArrow;
case ALLEGRO_KEY_PGUP: return ImGuiKey_PageUp;
case ALLEGRO_KEY_PGDN: return ImGuiKey_PageDown;
case ALLEGRO_KEY_HOME: return ImGuiKey_Home;
case ALLEGRO_KEY_END: return ImGuiKey_End;<fim_middle>#endif
// Not static to allow third-party code to use that if they want to (but undocumented)<fim_end>
|
if (bd->MouseCursorInvisible)
{
al_destroy_mouse_cursor(bd->MouseCursorInvisible);
bd->MouseCursorInvisible = nullptr;
}
}
#if ALLEGRO_HAS_CLIPBOARD
static const char* ImGui_ImplAllegro5_GetClipboardText(ImGuiContext*)
{
ImGui_ImplAllegro5_Data* bd = ImGui_ImplAllegro5_GetBackendData();
if (bd->ClipboardTextData)
al_free(bd->ClipboardTextData);
bd->ClipboardTextData = al_get_clipboard_text(bd->Display);
return bd->ClipboardTextData;
}
static void ImGui_ImplAllegro5_SetClipboardText(ImGuiContext*, const char* text)
{
ImGui_ImplAllegro5_Data* bd = ImGui_ImplAllegro5_GetBackendData();
al_set_clipboard_text(bd->Display, text);
}
|
#endif
// Not static to allow third-party code to use that if they want to (but undocumented)
|
ImGuiKey ImGui_ImplAllegro5_KeyCodeToImGuiKey(int key_code);
ImGuiKey ImGui_ImplAllegro5_KeyCodeToImGuiKey(int key_code)
{
switch (key_code)
{
case ALLEGRO_KEY_TAB: return ImGuiKey_Tab;
case ALLEGRO_KEY_LEFT: return ImGuiKey_LeftArrow;
case ALLEGRO_KEY_RIGHT: return ImGuiKey_RightArrow;
case ALLEGRO_KEY_UP: return ImGuiKey_UpArrow;
case ALLEGRO_KEY_DOWN: return ImGuiKey_DownArrow;
case ALLEGRO_KEY_PGUP: return ImGuiKey_PageUp;
case ALLEGRO_KEY_PGDN: return ImGuiKey_PageDown;
case ALLEGRO_KEY_HOME: return ImGuiKey_Home;
case ALLEGRO_KEY_END: return ImGuiKey_End;
|
random
|
<fim_prefix>is_valid()) {
if (p_request->data.has_value) {
switch (p_request->data.value.tag) {
case ACCESSKIT_ACTION_DATA_CUSTOM_ACTION: {
rq_data = p_request->data.value.custom_action;
} break;
case ACCESSKIT_ACTION_DATA_VALUE: {
rq_data = String::utf8(p_request->data.value.value);
} break;
case ACCESSKIT_ACTION_DATA_NUMERIC_VALUE: {
rq_data = p_request->data.value.numeric_value;
} break;
case ACCESSKIT_ACTION_DATA_SCROLL_HINT: {
switch (p_request->data.value.scroll_hint) {
case ACCESSKIT_SCROLL_HINT_TOP_LEFT: {
rq_data = DisplayServer::SCROLL_HINT_TOP_LEFT;
} break;
case ACCESSKIT_SCROLL_HINT_BOTTOM_RIGHT: <fim_suffix> break;
case ACCESSKIT_SCROLL_HINT_TOP_EDGE: {
rq_data = DisplayServer::SCROLL_HINT_TOP_EDGE;
} break;
case ACCESSKIT_SCROLL_HINT_BOTTOM_EDGE: {
rq_data = DisplayServer::SCROLL_HINT_BOTTOM_EDGE;
} break;
case ACCESSKIT_SCROLL_HINT_LEFT_EDGE: {
rq_data = DisplayServer::SCROLL_HINT_LEFT_EDGE;
} break;
case ACCESSKIT_SCROLL_HINT_RIGHT_EDGE: {
rq_data = DisplayServer::SCROLL_HINT_RIGHT_EDGE;
} break;
default:
break;
}
} break;
case ACCESSKIT_ACTION_DATA_SCROLL_UNIT: {
if (p_request->data.value.scroll_unit == ACCESSKIT_SCROLL_UNIT_ITEM) {
rq_data = DisplayServer::SCR<fim_middle>{
rq_data = DisplayServer::SCROLL_HINT_BOTTOM_RIGHT;
}<fim_end>
|
is_valid()) {
if (p_request->data.has_value) {
switch (p_request->data.value.tag) {
case ACCESSKIT_ACTION_DATA_CUSTOM_ACTION: {
rq_data = p_request->data.value.custom_action;
} break;
case ACCESSKIT_ACTION_DATA_VALUE: {
rq_data = String::utf8(p_request->data.value.value);
} break;
case ACCESSKIT_ACTION_DATA_NUMERIC_VALUE: {
rq_data = p_request->data.value.numeric_value;
} break;
case ACCESSKIT_ACTION_DATA_SCROLL_HINT: {
switch (p_request->data.value.scroll_hint) {
case ACCESSKIT_SCROLL_HINT_TOP_LEFT: {
rq_data = DisplayServer::SCROLL_HINT_TOP_LEFT;
} break;
case ACCESSKIT_SCROLL_HINT_BOTTOM_RIGHT:
|
{
rq_data = DisplayServer::SCROLL_HINT_BOTTOM_RIGHT;
}
|
break;
case ACCESSKIT_SCROLL_HINT_TOP_EDGE: {
rq_data = DisplayServer::SCROLL_HINT_TOP_EDGE;
} break;
case ACCESSKIT_SCROLL_HINT_BOTTOM_EDGE: {
rq_data = DisplayServer::SCROLL_HINT_BOTTOM_EDGE;
} break;
case ACCESSKIT_SCROLL_HINT_LEFT_EDGE: {
rq_data = DisplayServer::SCROLL_HINT_LEFT_EDGE;
} break;
case ACCESSKIT_SCROLL_HINT_RIGHT_EDGE: {
rq_data = DisplayServer::SCROLL_HINT_RIGHT_EDGE;
} break;
default:
break;
}
} break;
case ACCESSKIT_ACTION_DATA_SCROLL_UNIT: {
if (p_request->data.value.scroll_unit == ACCESSKIT_SCROLL_UNIT_ITEM) {
rq_data = DisplayServer::SCR
|
ast_based
|
<fim_prefix> ac_focus = (accesskit_node_id)singleton->focus.get_id();
}
accesskit_tree_update *tree_update = (update_size > 0) ? accesskit_tree_update_with_capacity_and_focus(update_size, ac_focus) : accesskit_tree_update_with_focus(ac_focus);
for (const RID &rid : wd.update) {
AccessibilityElement *ae = singleton->rid_owner.get_or_null(rid);
if (ae && ae->node) {
for (const RID &child_rid : ae->children) {
accesskit_node_push_child(ae->node, (accesskit_node_id)child_rid.get_id());
}
accesskit_tree_update_push_node(tree_update, (accesskit_node_id)rid.get_id(), ae->node);
ae->node = nullptr;
}
}
wd.update.clear();
return tree_update;
}
<fim_suffix> update_cb = p_callable;
for (KeyValue<DisplayServer::WindowID, WindowData> &window : windows) {
#ifdef WINDOWS_ENABLED
accesskit_windows_queued_events *events = accesskit_windows_subclassing_adapter_update_if_active(window.value.adapter, _accessibility_build_tree_update, (void *)(size_t)window.key);
if (events) {
accesskit_windows_queued_events_raise(events);
}
#endif
#ifdef MACOS_ENABLED
accesskit_macos_queued_events *events = accesskit_macos_subclassing_adapter_update_if_active(window.value.adapter, _accessibility_build_tree_update, (void *)(size_t)window.key);
if (events) {
accesskit_macos_queued_events_raise(events);
}
#endif
#ifdef LINUXBSD_ENABLED<fim_middle>void AccessibilityDriverAccessKit::accessibility_update_if_active(const Callable &p_callable) {
ERR_FAIL_COND(!p_callable.is_valid());<fim_end>
|
ac_focus = (accesskit_node_id)singleton->focus.get_id();
}
accesskit_tree_update *tree_update = (update_size > 0) ? accesskit_tree_update_with_capacity_and_focus(update_size, ac_focus) : accesskit_tree_update_with_focus(ac_focus);
for (const RID &rid : wd.update) {
AccessibilityElement *ae = singleton->rid_owner.get_or_null(rid);
if (ae && ae->node) {
for (const RID &child_rid : ae->children) {
accesskit_node_push_child(ae->node, (accesskit_node_id)child_rid.get_id());
}
accesskit_tree_update_push_node(tree_update, (accesskit_node_id)rid.get_id(), ae->node);
ae->node = nullptr;
}
}
wd.update.clear();
return tree_update;
}
|
void AccessibilityDriverAccessKit::accessibility_update_if_active(const Callable &p_callable) {
ERR_FAIL_COND(!p_callable.is_valid());
|
update_cb = p_callable;
for (KeyValue<DisplayServer::WindowID, WindowData> &window : windows) {
#ifdef WINDOWS_ENABLED
accesskit_windows_queued_events *events = accesskit_windows_subclassing_adapter_update_if_active(window.value.adapter, _accessibility_build_tree_update, (void *)(size_t)window.key);
if (events) {
accesskit_windows_queued_events_raise(events);
}
#endif
#ifdef MACOS_ENABLED
accesskit_macos_queued_events *events = accesskit_macos_subclassing_adapter_update_if_active(window.value.adapter, _accessibility_build_tree_update, (void *)(size_t)window.key);
if (events) {
accesskit_macos_queued_events_raise(events);
}
#endif
#ifdef LINUXBSD_ENABLED
|
random
|
<fim_prefix> ev->set_canceled(p_canceled);
ev->set_position(touch[i].pos);
ev->set_double_tap(p_double_tap);
Input::get_singleton()->parse_input_event(ev);
}
}
}
void AndroidInputHandler::_release_all_touch() {
_parse_all_touch(false, false);
touch.clear();
}
void AndroidInputHandler::process_touch_event(int p_event, int p_pointer, const Vector<TouchPos> &p_points, bool p_double_tap) {
switch (p_event) {
case AMOTION_EVENT_ACTION_DOWN: { //gesture begin
// Release any remaining touches or mouse event
_release_mouse_event_info();
_release_all_touch();
touch.resize(p_points.size());
for (int i = 0; i < p_points.size(); i++) {
touch.write[i].id = p_points[i].id;
<fim_suffix>
touch.write[i].tilt = p_points[i].tilt;
}
//send touch
_parse_all_touch(true, false, p_double_tap);
} break;
case AMOTION_EVENT_ACTION_MOVE: { //motion
if (touch.size() != p_points.size()) {
return;
}
for (int i = 0; i < touch.size(); i++) {
int idx = -1;
for (int j = 0; j < p_points.size(); j++) {
if (touch[i].id == p_points[j].id) {
idx = j;
break;
}
}
ERR_CONTINUE(idx == -1);
if (touch[i].pos == p_points[idx].pos) {
continue; // Don't move unnecessarily.
}
Ref<InputEventScreenDrag> ev;
ev.instantiate();
ev->set_index(touch[i].id);
ev->set_position(p_points[idx].pos);
ev->set_relativ<fim_middle>touch.write[i].pos = p_points[i].pos;
touch.write[i].pressure = p_points[i].pressure;<fim_end>
|
ev->set_canceled(p_canceled);
ev->set_position(touch[i].pos);
ev->set_double_tap(p_double_tap);
Input::get_singleton()->parse_input_event(ev);
}
}
}
void AndroidInputHandler::_release_all_touch() {
_parse_all_touch(false, false);
touch.clear();
}
void AndroidInputHandler::process_touch_event(int p_event, int p_pointer, const Vector<TouchPos> &p_points, bool p_double_tap) {
switch (p_event) {
case AMOTION_EVENT_ACTION_DOWN: { //gesture begin
// Release any remaining touches or mouse event
_release_mouse_event_info();
_release_all_touch();
touch.resize(p_points.size());
for (int i = 0; i < p_points.size(); i++) {
touch.write[i].id = p_points[i].id;
|
touch.write[i].pos = p_points[i].pos;
touch.write[i].pressure = p_points[i].pressure;
|
touch.write[i].tilt = p_points[i].tilt;
}
//send touch
_parse_all_touch(true, false, p_double_tap);
} break;
case AMOTION_EVENT_ACTION_MOVE: { //motion
if (touch.size() != p_points.size()) {
return;
}
for (int i = 0; i < touch.size(); i++) {
int idx = -1;
for (int j = 0; j < p_points.size(); j++) {
if (touch[i].id == p_points[j].id) {
idx = j;
break;
}
}
ERR_CONTINUE(idx == -1);
if (touch[i].pos == p_points[idx].pos) {
continue; // Don't move unnecessarily.
}
Ref<InputEventScreenDrag> ev;
ev.instantiate();
ev->set_index(touch[i].id);
ev->set_position(p_points[idx].pos);
ev->set_relativ
|
ast_based
|
<fim_prefix> E.value;
// Names and icon.
{
NodePath path = animation->track_get_path(tracks[0]);
Node *node = nullptr;
if (root && root->has_node(path)) {
node = root->get_node(path);
}
String text;
if (node) {
int ofs = 0;
Ref<Texture2D> icon = EditorNode::get_singleton()->get_object_icon(node, "Node");
text = node->get_name();
ofs += h_separation;
TextLine text_buf = TextLine(text, font, font_size);
text_buf.set_width(limit - ofs - icon->get_width() - h_separation);
int h = MAX(text_buf.get_size().y, icon->get_height());
draw_texture(icon, Point2(ofs, vofs + int(h - icon->get_height()) / 2.0));
<fim_suffix>
margin = icon->get_width();
Vector2 string_pos = Point2(ofs, vofs);
string_pos = string_pos.floor();
text_buf.draw(get_canvas_item(), string_pos, color);
vofs += h + v_separation;
track_v_scroll_max += h + v_separation;
}
}
const Color dc = get_theme_color(SNAME("font_disabled_color"), EditorStringName(Editor));
Ref<Texture2D> remove = get_editor_theme_icon(SNAME("Remove"));
float remove_hpos = limit - h_separation - remove->get_width();
Ref<Texture2D> lock = get_editor_theme_icon(SNAME("Lock"));
Ref<Texture2D> unlock = get_editor_theme_icon(SNAME("Unlock"));
float lock_hpos = remove_hpos - h_separation - lock->get_w<fim_middle>ofs += icon->get_width() + h_separation;<fim_end>
|
E.value;
// Names and icon.
{
NodePath path = animation->track_get_path(tracks[0]);
Node *node = nullptr;
if (root && root->has_node(path)) {
node = root->get_node(path);
}
String text;
if (node) {
int ofs = 0;
Ref<Texture2D> icon = EditorNode::get_singleton()->get_object_icon(node, "Node");
text = node->get_name();
ofs += h_separation;
TextLine text_buf = TextLine(text, font, font_size);
text_buf.set_width(limit - ofs - icon->get_width() - h_separation);
int h = MAX(text_buf.get_size().y, icon->get_height());
draw_texture(icon, Point2(ofs, vofs + int(h - icon->get_height()) / 2.0));
|
ofs += icon->get_width() + h_separation;
|
margin = icon->get_width();
Vector2 string_pos = Point2(ofs, vofs);
string_pos = string_pos.floor();
text_buf.draw(get_canvas_item(), string_pos, color);
vofs += h + v_separation;
track_v_scroll_max += h + v_separation;
}
}
const Color dc = get_theme_color(SNAME("font_disabled_color"), EditorStringName(Editor));
Ref<Texture2D> remove = get_editor_theme_icon(SNAME("Remove"));
float remove_hpos = limit - h_separation - remove->get_width();
Ref<Texture2D> lock = get_editor_theme_icon(SNAME("Lock"));
Ref<Texture2D> unlock = get_editor_theme_icon(SNAME("Unlock"));
float lock_hpos = remove_hpos - h_separation - lock->get_w
|
ast_based
|
<fim_prefix>imation::TrackType::TYPE_BEZIER) {
if (i != track && !hidden_tracks.has(i)) {
hidden_tracks.insert(i);
}
}
}
set_animation_and_track(animation, track, read_only);
solo_track = track;
}
queue_redraw();
return;
}
return;
}
}
}
// Check this first, to allow manipulating key handles while ignoring keyframes before scaling/moving.
bool inside_selection_handles_rect = !read_only && selection_handles_rect.has_point(mb->get_position());
// First, check keyframe.
// Command/Control makes it ignore the keyframe, so control point editors can be force-edited.
if (!inside_selection_handles_rect && !<fim_suffix>) {
if (_try_select_at_ui_pos(mb->get_position(), mb->is_shift_pressed(), true)) {
return;
}
}
// Second, check key handles.
for (int i = 0; i < edit_points.size(); i++) {
if (!read_only) {
if (edit_points[i].in_rect.has_point(mb->get_position())) {
moving_handle = -1;
moving_handle_key = edit_points[i].key;
moving_handle_track = edit_points[i].track;
moving_handle_left = animation->bezier_track_get_key_in_handle(edit_points[i].track, edit_points[i].key);
moving_handle_right = animation->bezier_track_get_key_out_handle(edit_points[i].track, edit_points[i].key);
queue_redraw();
return;
}
if (edit_points[i].out_rect.has_point(m<fim_middle>mb->is_command_or_control_pressed()<fim_end>
|
imation::TrackType::TYPE_BEZIER) {
if (i != track && !hidden_tracks.has(i)) {
hidden_tracks.insert(i);
}
}
}
set_animation_and_track(animation, track, read_only);
solo_track = track;
}
queue_redraw();
return;
}
return;
}
}
}
// Check this first, to allow manipulating key handles while ignoring keyframes before scaling/moving.
bool inside_selection_handles_rect = !read_only && selection_handles_rect.has_point(mb->get_position());
// First, check keyframe.
// Command/Control makes it ignore the keyframe, so control point editors can be force-edited.
if (!inside_selection_handles_rect && !
|
mb->is_command_or_control_pressed()
|
) {
if (_try_select_at_ui_pos(mb->get_position(), mb->is_shift_pressed(), true)) {
return;
}
}
// Second, check key handles.
for (int i = 0; i < edit_points.size(); i++) {
if (!read_only) {
if (edit_points[i].in_rect.has_point(mb->get_position())) {
moving_handle = -1;
moving_handle_key = edit_points[i].key;
moving_handle_track = edit_points[i].track;
moving_handle_left = animation->bezier_track_get_key_in_handle(edit_points[i].track, edit_points[i].key);
moving_handle_right = animation->bezier_track_get_key_out_handle(edit_points[i].track, edit_points[i].key);
queue_redraw();
return;
}
if (edit_points[i].out_rect.has_point(m
|
ast_based
|
<fim_prefix> real_t t = animation->track_get_key_time(E->get().first, E->get().second);
if (t < top_time) {
top_time = t;
}
}
EditorUndoRedoManager *undo_redo = EditorUndoRedoManager::get_singleton();
undo_redo->create_action(TTR("Animation Duplicate Keys"));
List<Pair<int, real_t>> new_selection_values;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t t = animation->track_get_key_time(E->get().first, E->get().second);
real_t insert_pos = p_ofs_valid ? p_ofs : timeline->get_play_position();
if (p_ofs_valid) {
if (editor->snap_keys->is_pressed() && editor->step->get_value() != 0) {<fim_suffix> }
}
real_t dst_time = t + (insert_pos - top_time);
int existing_idx = animation->track_find_key(E->get().first, dst_time, Animation::FIND_MODE_APPROX);
undo_redo->add_do_method(animation.ptr(), "track_insert_key", E->get().first, dst_time, animation->track_get_key_value(E->get().first, E->get().second), animation->track_get_key_transition(E->get().first, E->get().second));
undo_redo->add_undo_method(animation.ptr(), "track_remove_key_at_time", E->get().first, dst_time);
Pair<int, real_t> p;
p.first = E->get().first;
p.second = dst_time;
new_selection_values.push_back(p);
if (existing_idx != -1) {<fim_middle> insert_pos = editor->snap_time(insert_pos);<fim_end>
|
real_t t = animation->track_get_key_time(E->get().first, E->get().second);
if (t < top_time) {
top_time = t;
}
}
EditorUndoRedoManager *undo_redo = EditorUndoRedoManager::get_singleton();
undo_redo->create_action(TTR("Animation Duplicate Keys"));
List<Pair<int, real_t>> new_selection_values;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t t = animation->track_get_key_time(E->get().first, E->get().second);
real_t insert_pos = p_ofs_valid ? p_ofs : timeline->get_play_position();
if (p_ofs_valid) {
if (editor->snap_keys->is_pressed() && editor->step->get_value() != 0) {
|
insert_pos = editor->snap_time(insert_pos);
|
}
}
real_t dst_time = t + (insert_pos - top_time);
int existing_idx = animation->track_find_key(E->get().first, dst_time, Animation::FIND_MODE_APPROX);
undo_redo->add_do_method(animation.ptr(), "track_insert_key", E->get().first, dst_time, animation->track_get_key_value(E->get().first, E->get().second), animation->track_get_key_transition(E->get().first, E->get().second));
undo_redo->add_undo_method(animation.ptr(), "track_remove_key_at_time", E->get().first, dst_time);
Pair<int, real_t> p;
p.first = E->get().first;
p.second = dst_time;
new_selection_values.push_back(p);
if (existing_idx != -1) {
|
random
|
<fim_prefix> int j = (int)((*pointIt).y / yGridStep);
pointsInCell[i*gridSize + j]++;
}
for(std::vector<cv::Mat>::iterator it = mCalibData->allCharucoCorners.begin(); it != mCalibData->allCharucoCorners.end(); ++it)
for(int l = 0; l < (*it).size[0]; l++) {
int i = (int)((*it).at<float>(l, 0) / xGridStep);
int j = (int)((*it).at<float>(l, 1) / yGridStep);
pointsInCell[i*gridSize + j]++;
}
cv::Mat mean, stdDev;
cv::meanStdDev(pointsInCell, mean, stdDev);
return mean.at<double>(0) / (stdDev.at<double>(0) + 1e-7);
}
calib::calibController::calibController()<fim_suffix>}
calib::calibController::calibController(cv::Ptr<calib::calibrationData> data, int initialFlags, bool autoTuning, int minFramesNum) :
mCalibData(data)
{
mCalibFlags = initialFlags;
mNeedTuning = autoTuning;
mMinFramesNum = minFramesNum;
mConfIntervalsState = false;
mCoverageQualityState = false;
}
void calib::calibController::updateState()
{
if(mCalibData->cameraMatrix.total()) {
const double relErrEps = 0.05;
bool fConfState = false, cConfState = false, dConfState = true;
if(sigmaMult*mCalibData->stdDeviations.at<double>(0) / mCalibData->cameraMatrix.at<double>(0,0) < relErrEps &&<fim_middle>{
mCalibFlags = 0;<fim_end>
|
int j = (int)((*pointIt).y / yGridStep);
pointsInCell[i*gridSize + j]++;
}
for(std::vector<cv::Mat>::iterator it = mCalibData->allCharucoCorners.begin(); it != mCalibData->allCharucoCorners.end(); ++it)
for(int l = 0; l < (*it).size[0]; l++) {
int i = (int)((*it).at<float>(l, 0) / xGridStep);
int j = (int)((*it).at<float>(l, 1) / yGridStep);
pointsInCell[i*gridSize + j]++;
}
cv::Mat mean, stdDev;
cv::meanStdDev(pointsInCell, mean, stdDev);
return mean.at<double>(0) / (stdDev.at<double>(0) + 1e-7);
}
calib::calibController::calibController()
|
{
mCalibFlags = 0;
|
}
calib::calibController::calibController(cv::Ptr<calib::calibrationData> data, int initialFlags, bool autoTuning, int minFramesNum) :
mCalibData(data)
{
mCalibFlags = initialFlags;
mNeedTuning = autoTuning;
mMinFramesNum = minFramesNum;
mConfIntervalsState = false;
mCoverageQualityState = false;
}
void calib::calibController::updateState()
{
if(mCalibData->cameraMatrix.total()) {
const double relErrEps = 0.05;
bool fConfState = false, cConfState = false, dConfState = true;
if(sigmaMult*mCalibData->stdDeviations.at<double>(0) / mCalibData->cameraMatrix.at<double>(0,0) < relErrEps &&
|
random
|
<fim_prefix> if (wlen > 0) {
word_lengths.push_back(wlen);
total += wlen;
}
prev = words[j];
}
if (total < t.length()) {
word_lengths.push_back(t.length() - total);
}
accesskit_node_set_word_lengths(ae->node, word_lengths.size(), word_lengths.ptr());
// Char widths and positions.
Vector<float> char_positions;
Vector<float> char_widths;
char_positions.resize_initialized(t.length());
float *positions_ptr = char_positions.ptrw();
char_widths.resize_initialized(t.length());
float *widths_ptr = char_widths.ptrw();
float size_x = 0.0;
for (int j = gl_index; j < gl_count; j += gl[j].count) {
if (gl[j].start >= ae->run.y) {
gl_index = j;
break;
}
<fim_suffix> // Graphame advance.
for (int k = 0; k < gl[j].count; k++) {
advance += gl[j + k].advance;
}
int chars = gl[j].end - gl[j].start;
float adv_per_char = advance / (float)chars;
for (int k = 0; k < chars; k++) {
int index = gl[j].start + k - ae->run.x;
ERR_CONTINUE(index < 0 || index >= t.length());
positions_ptr[index] = size_x + adv_per_char * k;
widths_ptr[index] = adv_per_char;
}
size_x += advance * gl[j].repeat;
}
positions_ptr[t.length() - 1] = size_x;
widths_ptr[t.length() - 1] = 1.0;
accesskit_node_set_character_positions(ae->node, char_positions.size(), char_positions.ptr());
accesskit_node_set_character_widths(ae->node, char_widths<fim_middle>float advance = 0.0;<fim_end>
|
if (wlen > 0) {
word_lengths.push_back(wlen);
total += wlen;
}
prev = words[j];
}
if (total < t.length()) {
word_lengths.push_back(t.length() - total);
}
accesskit_node_set_word_lengths(ae->node, word_lengths.size(), word_lengths.ptr());
// Char widths and positions.
Vector<float> char_positions;
Vector<float> char_widths;
char_positions.resize_initialized(t.length());
float *positions_ptr = char_positions.ptrw();
char_widths.resize_initialized(t.length());
float *widths_ptr = char_widths.ptrw();
float size_x = 0.0;
for (int j = gl_index; j < gl_count; j += gl[j].count) {
if (gl[j].start >= ae->run.y) {
gl_index = j;
break;
}
|
float advance = 0.0;
|
// Graphame advance.
for (int k = 0; k < gl[j].count; k++) {
advance += gl[j + k].advance;
}
int chars = gl[j].end - gl[j].start;
float adv_per_char = advance / (float)chars;
for (int k = 0; k < chars; k++) {
int index = gl[j].start + k - ae->run.x;
ERR_CONTINUE(index < 0 || index >= t.length());
positions_ptr[index] = size_x + adv_per_char * k;
widths_ptr[index] = adv_per_char;
}
size_x += advance * gl[j].repeat;
}
positions_ptr[t.length() - 1] = size_x;
widths_ptr[t.length() - 1] = 1.0;
accesskit_node_set_character_positions(ae->node, char_positions.size(), char_positions.ptr());
accesskit_node_set_character_widths(ae->node, char_widths
|
ast_based
|
<fim_prefix>es.push_back(p);
if (existing_idx != -1) {
undo_redo->add_undo_method(animation.ptr(), "track_insert_key", E->get().first, dst_time, animation->track_get_key_value(E->get().first, existing_idx), animation->track_get_key_transition(E->get().first, existing_idx));
}
}
undo_redo->add_do_method(this, "_clear_selection_for_anim", animation);
undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation);
// Reselect duplicated.
int i = 0;
for (const Pair<int, real_t> &E : new_selection_values) {
undo_redo->add_do_method(this, "_select_at_anim", animation, E.first, E.second, i == 0);
i++;
}
i = 0;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) <fim_suffix>
undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, time, i == 0);
i++;
}
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->add_do_method(this, "queue_redraw");
undo_redo->add_undo_method(this, "queue_redraw");
undo_redo->commit_action();
}
void AnimationBezierTrackEdit::copy_selected_keys(bool p_cut) {
if (selection.is_empty()) {
return;
}
float top_time = 1e10;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
float t = animation->track_get_key_time(E<fim_middle>{
real_t time = animation->track_get_key_time(E->get().first, E->get().second);<fim_end>
|
es.push_back(p);
if (existing_idx != -1) {
undo_redo->add_undo_method(animation.ptr(), "track_insert_key", E->get().first, dst_time, animation->track_get_key_value(E->get().first, existing_idx), animation->track_get_key_transition(E->get().first, existing_idx));
}
}
undo_redo->add_do_method(this, "_clear_selection_for_anim", animation);
undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation);
// Reselect duplicated.
int i = 0;
for (const Pair<int, real_t> &E : new_selection_values) {
undo_redo->add_do_method(this, "_select_at_anim", animation, E.first, E.second, i == 0);
i++;
}
i = 0;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev())
|
{
real_t time = animation->track_get_key_time(E->get().first, E->get().second);
|
undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, time, i == 0);
i++;
}
AnimationPlayerEditor *ape = AnimationPlayerEditor::get_singleton();
if (ape) {
undo_redo->add_do_method(ape, "_animation_update_key_frame");
undo_redo->add_undo_method(ape, "_animation_update_key_frame");
}
undo_redo->add_do_method(this, "queue_redraw");
undo_redo->add_undo_method(this, "queue_redraw");
undo_redo->commit_action();
}
void AnimationBezierTrackEdit::copy_selected_keys(bool p_cut) {
if (selection.is_empty()) {
return;
}
float top_time = 1e10;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
float t = animation->track_get_key_time(E
|
ast_based
|
<fim_prefix>
Vector2 string_pos = Point2(ofs, vofs);
string_pos = string_pos.floor();
text_buf.draw(get_canvas_item(), string_pos, color);
vofs += h + v_separation;
track_v_scroll_max += h + v_separation;
}
}
const Color dc = get_theme_color(SNAME("font_disabled_color"), EditorStringName(Editor));
Ref<Texture2D> remove = get_editor_theme_icon(SNAME("Remove"));
float remove_hpos = limit - h_separation - remove->get_width();
Ref<Texture2D> lock = get_editor_theme_icon(SNAME("Lock"));
Ref<Texture2D> unlock = get_editor_theme_icon(SNAME("Unlock"));
float lock_hpos = remove_hpos - h_separation - lock->get_width();<fim_suffix> float visibility_hpos = lock_hpos - h_separation - visibility_visible->get_width();
Ref<Texture2D> solo = get_editor_theme_icon(SNAME("AudioBusSolo"));
float solo_hpos = visibility_hpos - h_separation - solo->get_width();
float buttons_width = remove->get_width() + lock->get_width() + visibility_visible->get_width() + solo->get_width() + h_separation * 3;
for (int i = 0; i < tracks.size(); ++i) {
// Related track titles.
int current_track = tracks[i];
String path = String(animation->track_get_path(current_track));
path = path.replace_first(base_path, "");
Color cc = color;
TextLine text_buf = TextLine(path, font, font_size);<fim_middle>
Ref<Texture2D> visibility_visible = get_editor_theme_icon(SNAME("GuiVisibilityVisible"));
Ref<Texture2D> visibility_hidden = get_editor_theme_icon(SNAME("GuiVisibilityHidden"));<fim_end>
|
Vector2 string_pos = Point2(ofs, vofs);
string_pos = string_pos.floor();
text_buf.draw(get_canvas_item(), string_pos, color);
vofs += h + v_separation;
track_v_scroll_max += h + v_separation;
}
}
const Color dc = get_theme_color(SNAME("font_disabled_color"), EditorStringName(Editor));
Ref<Texture2D> remove = get_editor_theme_icon(SNAME("Remove"));
float remove_hpos = limit - h_separation - remove->get_width();
Ref<Texture2D> lock = get_editor_theme_icon(SNAME("Lock"));
Ref<Texture2D> unlock = get_editor_theme_icon(SNAME("Unlock"));
float lock_hpos = remove_hpos - h_separation - lock->get_width();
|
Ref<Texture2D> visibility_visible = get_editor_theme_icon(SNAME("GuiVisibilityVisible"));
Ref<Texture2D> visibility_hidden = get_editor_theme_icon(SNAME("GuiVisibilityHidden"));
|
float visibility_hpos = lock_hpos - h_separation - visibility_visible->get_width();
Ref<Texture2D> solo = get_editor_theme_icon(SNAME("AudioBusSolo"));
float solo_hpos = visibility_hpos - h_separation - solo->get_width();
float buttons_width = remove->get_width() + lock->get_width() + visibility_visible->get_width() + solo->get_width() + h_separation * 3;
for (int i = 0; i < tracks.size(); ++i) {
// Related track titles.
int current_track = tracks[i];
String path = String(animation->track_get_path(current_track));
path = path.replace_first(base_path, "");
Color cc = color;
TextLine text_buf = TextLine(path, font, font_size);
|
random
|
<fim_prefix>/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#ifdef ACCESSKIT_ENABLED
#include "accessibility_driver_accesskit.h"<fim_suffix>#include "core/version.h"
#include "servers/text_server.h"
AccessibilityDriverAccessKit *AccessibilityDriverAccessKit::singleton = nullptr;
_FORCE_INLINE_ accesskit_role AccessibilityDriverAccessKit::_accessibility_role(DisplayServer::AccessibilityRole p_role) const {
if (role_map.has(p_role)) {
return role_map[p_role];
}
return ACCESSKIT_ROLE_UNKNOWN;
}
_FORCE_INLINE_ accesskit_action AccessibilityDriverAccessKit::_accessibility_action(DisplayServer::AccessibilityAction p_action) const {
if (action_map.has(p_action)) {
return action_map[p_action];
}
return ACCESSKIT_ACTION_CLICK;
}
<fim_middle>
#include "core/config/project_settings.h"<fim_end>
|
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#ifdef ACCESSKIT_ENABLED
#include "accessibility_driver_accesskit.h"
|
#include "core/config/project_settings.h"
|
#include "core/version.h"
#include "servers/text_server.h"
AccessibilityDriverAccessKit *AccessibilityDriverAccessKit::singleton = nullptr;
_FORCE_INLINE_ accesskit_role AccessibilityDriverAccessKit::_accessibility_role(DisplayServer::AccessibilityRole p_role) const {
if (role_map.has(p_role)) {
return role_map[p_role];
}
return ACCESSKIT_ROLE_UNKNOWN;
}
_FORCE_INLINE_ accesskit_action AccessibilityDriverAccessKit::_accessibility_action(DisplayServer::AccessibilityAction p_action) const {
if (action_map.has(p_action)) {
return action_map[p_action];
}
return ACCESSKIT_ACTION_CLICK;
}
|
random
|
<fim_prefix> animation->track_find_key(selected_track, time, Animation::FIND_MODE_APPROX);
ERR_FAIL_COND(index == -1);
_clear_selection();
_select_at_anim(animation, selected_track, animation->track_get_key_time(selected_track, index), true);
moving_selection_attempt = true;
moving_inserted_key = true;
moving_selection = false;
moving_selection_mouse_begin = mb->get_position();
moving_selection_from_key = index;
moving_selection_from_track = selected_track;
moving_selection_offset = Vector2();
select_single_attempt = IntPair(-1, -1);
queue_redraw();
return;
}
// Box select.
if (mb->get_position().x >= limit && mb->get_position().x < get_size().width) {
<fim_suffix>;
box_selecting = false;
box_selecting_add = false;
box_selection_from = mb->get_position();
return;
}
}
if (box_selecting_attempt && mb.is_valid() && !mb->is_pressed() && mb->get_button_index() == MouseButton::LEFT) {
if (box_selecting) {
// Do actual select.
if (!box_selecting_add) {
_clear_selection();
}
Vector2 bs_from = box_selection_from;
Vector2 bs_to = box_selection_to;
if (bs_from.x > bs_to.x) {
SWAP(bs_from.x, bs_to.x);
}
if (bs_from.y > bs_to.y) {
SWAP(bs_from.y, bs_to.y);
}
Rect2 rect(bs_from, bs_to - bs_from);
bool track_set = false;
int j = 0;
for (int i = 0; i < edit_points.size(); i++) {
if (edit_poin<fim_middle>box_selecting_attempt = true<fim_end>
|
animation->track_find_key(selected_track, time, Animation::FIND_MODE_APPROX);
ERR_FAIL_COND(index == -1);
_clear_selection();
_select_at_anim(animation, selected_track, animation->track_get_key_time(selected_track, index), true);
moving_selection_attempt = true;
moving_inserted_key = true;
moving_selection = false;
moving_selection_mouse_begin = mb->get_position();
moving_selection_from_key = index;
moving_selection_from_track = selected_track;
moving_selection_offset = Vector2();
select_single_attempt = IntPair(-1, -1);
queue_redraw();
return;
}
// Box select.
if (mb->get_position().x >= limit && mb->get_position().x < get_size().width) {
|
box_selecting_attempt = true
|
;
box_selecting = false;
box_selecting_add = false;
box_selection_from = mb->get_position();
return;
}
}
if (box_selecting_attempt && mb.is_valid() && !mb->is_pressed() && mb->get_button_index() == MouseButton::LEFT) {
if (box_selecting) {
// Do actual select.
if (!box_selecting_add) {
_clear_selection();
}
Vector2 bs_from = box_selection_from;
Vector2 bs_to = box_selection_to;
if (bs_from.x > bs_to.x) {
SWAP(bs_from.x, bs_to.x);
}
if (bs_from.y > bs_to.y) {
SWAP(bs_from.y, bs_to.y);
}
Rect2 rect(bs_from, bs_to - bs_from);
bool track_set = false;
int j = 0;
for (int i = 0; i < edit_points.size(); i++) {
if (edit_poin
|
ast_based
|
<fim_prefix>t_type(i) == Animation::TrackType::TYPE_BEZIER) {
visible_tracks.push_back(i);
}
}
if (visible_tracks.size() == 1) {
solo_track = visible_tracks[0];
} else {
solo_track = -1;
}
queue_redraw();
return;
} else if (I.key == SOLO_ICON) {
if (solo_track == track) {
solo_track = -1;
hidden_tracks.clear();
} else {
if (hidden_tracks.has(track)) {
hidden_tracks.erase(track);
}
for (int i = 0; i < animation->get_track_count(); ++i) {
if (animation->track_get_type(i) == Animation::TrackType::TYPE_BEZIER) {
if (i != track && !hidden_tracks.has(i)) {
<fim_suffix>
}
}
}
set_animation_and_track(animation, track, read_only);
solo_track = track;
}
queue_redraw();
return;
}
return;
}
}
}
// Check this first, to allow manipulating key handles while ignoring keyframes before scaling/moving.
bool inside_selection_handles_rect = !read_only && selection_handles_rect.has_point(mb->get_position());
// First, check keyframe.
// Command/Control makes it ignore the keyframe, so control point editors can be force-edited.
if (!inside_selection_handles_rect && !mb->is_command_or_control_pressed()) {
if (_try_select_at_ui_pos(mb->get_position(), mb->is_shift_pressed(), true)) {
<fim_middle>hidden_tracks.insert(i);<fim_end>
|
t_type(i) == Animation::TrackType::TYPE_BEZIER) {
visible_tracks.push_back(i);
}
}
if (visible_tracks.size() == 1) {
solo_track = visible_tracks[0];
} else {
solo_track = -1;
}
queue_redraw();
return;
} else if (I.key == SOLO_ICON) {
if (solo_track == track) {
solo_track = -1;
hidden_tracks.clear();
} else {
if (hidden_tracks.has(track)) {
hidden_tracks.erase(track);
}
for (int i = 0; i < animation->get_track_count(); ++i) {
if (animation->track_get_type(i) == Animation::TrackType::TYPE_BEZIER) {
if (i != track && !hidden_tracks.has(i)) {
|
hidden_tracks.insert(i);
|
}
}
}
set_animation_and_track(animation, track, read_only);
solo_track = track;
}
queue_redraw();
return;
}
return;
}
}
}
// Check this first, to allow manipulating key handles while ignoring keyframes before scaling/moving.
bool inside_selection_handles_rect = !read_only && selection_handles_rect.has_point(mb->get_position());
// First, check keyframe.
// Command/Control makes it ignore the keyframe, so control point editors can be force-edited.
if (!inside_selection_handles_rect && !mb->is_command_or_control_pressed()) {
if (_try_select_at_ui_pos(mb->get_position(), mb->is_shift_pressed(), true)) {
|
ast_based
|
<fim_prefix>onTimelineEdit *timeline = nullptr;
Node *root = nullptr;
Control *play_position = nullptr; //separate control used to draw so updates for only position changed are much faster
real_t play_position_pos = 0;
Ref<Animation> animation;
bool read_only = false;
int selected_track = 0;
Vector<Rect2> view_rects;
Ref<Texture2D> bezier_icon;
Ref<Texture2D> bezier_handle_icon;
Ref<Texture2D> selected_icon;
RBMap<int, Rect2> subtracks;
enum {
REMOVE_ICON,
LOCK_ICON,
SOLO_ICON,
VISIBILITY_ICON
};
RBMap<int, RBMap<int, Rect2>> subtrack_icons;
HashSet<int> locked_tracks;
HashSet<int> hidden_tracks;
int solo_track = -1;
bool is_filtered = false;
float track_v_scroll = 0;
<fim_suffix>
float timeline_v_scroll = 0;
float timeline_v_zoom = 1;
PopupMenu *menu = nullptr;
void _zoom_changed();
void _update_locked_tracks_after(int p_track);
void _update_hidden_tracks_after(int p_track);
virtual void gui_input(const Ref<InputEvent> &p_event) override;
void _menu_selected(int p_index);
void _play_position_draw();
bool _is_track_displayed(int p_track_index);
bool _is_track_curves_displayed(int p_track_index);
Vector2 insert_at_pos;
typedef Pair<int, int> IntPair;
bool moving_selection_attempt = false;
bool moving_inserted_key = false;
Point2 moving_selection_mouse_begin;
IntPair select_single_attempt;
bool moving_selection = false;
int moving_selection_<fim_middle>float track_v_scroll_max = 0;<fim_end>
|
onTimelineEdit *timeline = nullptr;
Node *root = nullptr;
Control *play_position = nullptr; //separate control used to draw so updates for only position changed are much faster
real_t play_position_pos = 0;
Ref<Animation> animation;
bool read_only = false;
int selected_track = 0;
Vector<Rect2> view_rects;
Ref<Texture2D> bezier_icon;
Ref<Texture2D> bezier_handle_icon;
Ref<Texture2D> selected_icon;
RBMap<int, Rect2> subtracks;
enum {
REMOVE_ICON,
LOCK_ICON,
SOLO_ICON,
VISIBILITY_ICON
};
RBMap<int, RBMap<int, Rect2>> subtrack_icons;
HashSet<int> locked_tracks;
HashSet<int> hidden_tracks;
int solo_track = -1;
bool is_filtered = false;
float track_v_scroll = 0;
|
float track_v_scroll_max = 0;
|
float timeline_v_scroll = 0;
float timeline_v_zoom = 1;
PopupMenu *menu = nullptr;
void _zoom_changed();
void _update_locked_tracks_after(int p_track);
void _update_hidden_tracks_after(int p_track);
virtual void gui_input(const Ref<InputEvent> &p_event) override;
void _menu_selected(int p_index);
void _play_position_draw();
bool _is_track_displayed(int p_track_index);
bool _is_track_curves_displayed(int p_track_index);
Vector2 insert_at_pos;
typedef Pair<int, int> IntPair;
bool moving_selection_attempt = false;
bool moving_inserted_key = false;
Point2 moving_selection_mouse_begin;
IntPair select_single_attempt;
bool moving_selection = false;
int moving_selection_
|
ast_based
|
<fim_prefix> float h = p_h;
h = (h - timeline_v_scroll) / timeline_v_zoom;
h = (get_size().height / 2.0) - h;
return h;
}
void AnimationBezierTrackEdit::_draw_track(int p_track, const Color &p_color) {
float scale = timeline->get_zoom_scale();
int limit = timeline->get_name_limit();
int right_limit = get_size().width;
// Selection may have altered the order of keys.
RBMap<real_t, int> key_order;
for (int i = 0; i < animation->track_get_key_count(p_track); i++) {
real_t ofs = animation->track_get_key_time(p_track, i);
if (selection.has(IntPair(p_track, i))) {
if (moving_selection) {
ofs += moving_selection_offset.x;
} else if (scaling_selection) {<fim_suffix> }
}
key_order[ofs] = i;
}
for (RBMap<real_t, int>::Element *E = key_order.front(); E; E = E->next()) {
int i = E->get();
if (!E->next()) {
break;
}
int i_n = E->next()->get();
float offset = animation->track_get_key_time(p_track, i);
float height = animation->bezier_track_get_key_value(p_track, i);
Vector2 out_handle = animation->bezier_track_get_key_out_handle(p_track, i);
if (p_track == moving_handle_track && (moving_handle == -1 || moving_handle == 1) && moving_handle_key == i) {
out_handle = moving_handle_right;
}
if (selection.has(IntPair(p_track, i))) {
if (moving_selection) {
offset += moving_selection_offset.x;<fim_middle> ofs += -scaling_selection_offset.x + (ofs - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);<fim_end>
|
float h = p_h;
h = (h - timeline_v_scroll) / timeline_v_zoom;
h = (get_size().height / 2.0) - h;
return h;
}
void AnimationBezierTrackEdit::_draw_track(int p_track, const Color &p_color) {
float scale = timeline->get_zoom_scale();
int limit = timeline->get_name_limit();
int right_limit = get_size().width;
// Selection may have altered the order of keys.
RBMap<real_t, int> key_order;
for (int i = 0; i < animation->track_get_key_count(p_track); i++) {
real_t ofs = animation->track_get_key_time(p_track, i);
if (selection.has(IntPair(p_track, i))) {
if (moving_selection) {
ofs += moving_selection_offset.x;
} else if (scaling_selection) {
|
ofs += -scaling_selection_offset.x + (ofs - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
|
}
}
key_order[ofs] = i;
}
for (RBMap<real_t, int>::Element *E = key_order.front(); E; E = E->next()) {
int i = E->get();
if (!E->next()) {
break;
}
int i_n = E->next()->get();
float offset = animation->track_get_key_time(p_track, i);
float height = animation->bezier_track_get_key_value(p_track, i);
Vector2 out_handle = animation->bezier_track_get_key_out_handle(p_track, i);
if (p_track == moving_handle_track && (moving_handle == -1 || moving_handle == 1) && moving_handle_key == i) {
out_handle = moving_handle_right;
}
if (selection.has(IntPair(p_track, i))) {
if (moving_selection) {
offset += moving_selection_offset.x;
|
random
|
<fim_prefix>ult_params);
exit(0);
} else {
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
print_usage(argc, argv, &default_params);
exit(1);
}
}
if (invalid_param) {
fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str());
print_usage(argc, argv, &default_params);
exit(1);
}
if (!reqd_param_found){
fprintf(stderr, "error: please specify a llama2.c .bin file to be converted with argument --llama2c-model\n");
print_usage(argc, argv, &default_params);
exit(1);
}
return true;
}
static std::string basename(const std::string &path) {
<fim_suffix>
if (pos == std::string::npos) {
return path;
}
return path.substr(pos + 1);
}
int main(int argc, char ** argv) {
common_init();
struct train_params params = get_default_train_params();
if (!params_parse(argc, argv, ¶ms)) {
return 1;
}
Config config;
TransformerWeights weights = {};
{
LOG_INF("%s: Loading llama2c model from %s\n", __func__, params.fn_llama2c_model);
FILE * file = fopen(params.fn_llama2c_model, "rb");
if (!file) {
LOG_ERR("%s: Unable to open the checkpoint file %s!\n", __func__, params.fn_llama2c_model);
return 1;
}
// read in the config header
<fim_middle>size_t pos = path.find_last_of("/\\");<fim_end>
|
ult_params);
exit(0);
} else {
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
print_usage(argc, argv, &default_params);
exit(1);
}
}
if (invalid_param) {
fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str());
print_usage(argc, argv, &default_params);
exit(1);
}
if (!reqd_param_found){
fprintf(stderr, "error: please specify a llama2.c .bin file to be converted with argument --llama2c-model\n");
print_usage(argc, argv, &default_params);
exit(1);
}
return true;
}
static std::string basename(const std::string &path) {
|
size_t pos = path.find_last_of("/\\");
|
if (pos == std::string::npos) {
return path;
}
return path.substr(pos + 1);
}
int main(int argc, char ** argv) {
common_init();
struct train_params params = get_default_train_params();
if (!params_parse(argc, argv, ¶ms)) {
return 1;
}
Config config;
TransformerWeights weights = {};
{
LOG_INF("%s: Loading llama2c model from %s\n", __func__, params.fn_llama2c_model);
FILE * file = fopen(params.fn_llama2c_model, "rb");
if (!file) {
LOG_ERR("%s: Unable to open the checkpoint file %s!\n", __func__, params.fn_llama2c_model);
return 1;
}
// read in the config header
|
ast_based
|
<fim_prefix>
Mat distCoeffs = c == 2 ? distCoeffs2 : distCoeffs3;
Mat R, T, E, F;
double err = stereoCalibrate(objpt, imgpt, imgpt_right, cameraMatrix1, distCoeffs1,
cameraMatrix, distCoeffs,
imageSize, R, T, E, F,
CALIB_FIX_INTRINSIC,
TermCriteria(TermCriteria::COUNT, 30, 0));
printf("Pair (1,%d) calibration reprojection error = %g\n", c, sqrt(err/(N*2)));
if( c == 2 )
{
cameraMatrix2 = cameraMatrix;
distCoeffs2 = distCoeffs;
R12 = R; T12 = T;
}
else
<fim_suffix>
}
return true;
}
static bool readStringList( const string& filename, vector<string>& l )
{
l.resize(0);
FileStorage fs(filename, FileStorage::READ);
if( !fs.isOpened() )
return false;
FileNode n = fs.getFirstTopLevelNode();
if( n.type() != FileNode::SEQ )
return false;
FileNodeIterator it = n.begin(), it_end = n.end();
for( ; it != it_end; ++it )
l.push_back((string)*it);
return true;
}
int main( int argc, char** argv )
{
int i, k;
int flags = 0;
Size boardSize, imageSize;
float squareSize, aspectRatio;
string outputFilename;
string inputFilename = "";
vector<vector<Point2f> > imgpt[3];
vect<fim_middle>{
R13 = R; T13 = T;
}<fim_end>
|
Mat distCoeffs = c == 2 ? distCoeffs2 : distCoeffs3;
Mat R, T, E, F;
double err = stereoCalibrate(objpt, imgpt, imgpt_right, cameraMatrix1, distCoeffs1,
cameraMatrix, distCoeffs,
imageSize, R, T, E, F,
CALIB_FIX_INTRINSIC,
TermCriteria(TermCriteria::COUNT, 30, 0));
printf("Pair (1,%d) calibration reprojection error = %g\n", c, sqrt(err/(N*2)));
if( c == 2 )
{
cameraMatrix2 = cameraMatrix;
distCoeffs2 = distCoeffs;
R12 = R; T12 = T;
}
else
|
{
R13 = R; T13 = T;
}
|
}
return true;
}
static bool readStringList( const string& filename, vector<string>& l )
{
l.resize(0);
FileStorage fs(filename, FileStorage::READ);
if( !fs.isOpened() )
return false;
FileNode n = fs.getFirstTopLevelNode();
if( n.type() != FileNode::SEQ )
return false;
FileNodeIterator it = n.begin(), it_end = n.end();
for( ; it != it_end; ++it )
l.push_back((string)*it);
return true;
}
int main( int argc, char** argv )
{
int i, k;
int flags = 0;
Size boardSize, imageSize;
float squareSize, aspectRatio;
string outputFilename;
string inputFilename = "";
vector<vector<Point2f> > imgpt[3];
vect
|
ast_based
|
<fim_prefix>rn;
}
animation = p_name;
emit_signal(SceneStringName(animation_changed));
if (frames.is_null()) {
animation = StringName();
stop();
ERR_FAIL_MSG(vformat("There is no animation with name '%s'.", p_name));
}
int frame_count = frames->get_frame_count(animation);
if (animation == StringName() || frame_count == 0) {
stop();
return;
} else if (!frames->get_animation_names().has(animation)) {
animation = StringName();
stop();
ERR_FAIL_MSG(vformat("There is no animation with name '%s'.", p_name));
}
if (std::signbit(get_playing_speed())) {
set_frame_and_progress(frame_count - 1, 1.0);
} else {
set_frame_and_progress(0, 0.0);
}
notify_property_list_changed();
<fim_suffix>;
}
StringName AnimatedSprite2D::get_animation() const {
return animation;
}
PackedStringArray AnimatedSprite2D::get_configuration_warnings() const {
PackedStringArray warnings = Node2D::get_configuration_warnings();
if (frames.is_null()) {
warnings.push_back(RTR("A SpriteFrames resource must be created or set in the \"Sprite Frames\" property in order for AnimatedSprite2D to display frames."));
}
return warnings;
}
#ifdef TOOLS_ENABLED
void AnimatedSprite2D::get_argument_options(const StringName &p_function, int p_idx, List<String> *r_options) const {
const String pf = p_function;
if (p_idx == 0 && frames.is_valid()) {
if (pf == "play" || pf == "play_backwards" || pf == "set_a<fim_middle>queue_redraw()<fim_end>
|
rn;
}
animation = p_name;
emit_signal(SceneStringName(animation_changed));
if (frames.is_null()) {
animation = StringName();
stop();
ERR_FAIL_MSG(vformat("There is no animation with name '%s'.", p_name));
}
int frame_count = frames->get_frame_count(animation);
if (animation == StringName() || frame_count == 0) {
stop();
return;
} else if (!frames->get_animation_names().has(animation)) {
animation = StringName();
stop();
ERR_FAIL_MSG(vformat("There is no animation with name '%s'.", p_name));
}
if (std::signbit(get_playing_speed())) {
set_frame_and_progress(frame_count - 1, 1.0);
} else {
set_frame_and_progress(0, 0.0);
}
notify_property_list_changed();
|
queue_redraw()
|
;
}
StringName AnimatedSprite2D::get_animation() const {
return animation;
}
PackedStringArray AnimatedSprite2D::get_configuration_warnings() const {
PackedStringArray warnings = Node2D::get_configuration_warnings();
if (frames.is_null()) {
warnings.push_back(RTR("A SpriteFrames resource must be created or set in the \"Sprite Frames\" property in order for AnimatedSprite2D to display frames."));
}
return warnings;
}
#ifdef TOOLS_ENABLED
void AnimatedSprite2D::get_argument_options(const StringName &p_function, int p_idx, List<String> *r_options) const {
const String pf = p_function;
if (p_idx == 0 && frames.is_valid()) {
if (pf == "play" || pf == "play_backwards" || pf == "set_a
|
ast_based
|
<fim_prefix> is_hdr = false;
} break;
case Image::FORMAT_ASTC_4x4_HDR: {
block_x = 4;
block_y = 4;
is_hdr = true;
} break;
case Image::FORMAT_ASTC_8x8: {
block_x = 8;
block_y = 8;
is_hdr = false;
} break;
case Image::FORMAT_ASTC_8x8_HDR: {
block_x = 8;
block_y = 8;
is_hdr = true;
} break;
default: {
ERR_FAIL_MSG(vformat("astcenc: Cannot decompress Image with a non-ASTC format: %s.", Image::get_format_name(src_format)));
} break;
}
// Initialize astcenc.
const astcenc_profile profile = is_hdr ? ASTCENC_PRF_HDR : ASTCENC_PRF_LDR;
astcenc_config config;
const float quality = ASTCENC_PRE_MEDIUM;
const uint32_t flags = ASTCENC_FLG_DECOMPRESS_ONLY;
<fim_suffix> ERR_FAIL_COND_MSG(status != ASTCENC_SUCCESS,
vformat("astcenc: Configuration initialization failed: %s.", astcenc_get_error_string(status)));
// Context allocation.
astcenc_context *context = nullptr;
const unsigned int thread_count = 1;
status = astcenc_context_alloc(&config, thread_count, &context);
ERR_FAIL_COND_MSG(status != ASTCENC_SUCCESS,
vformat("astcenc: Context allocation failed: %s.", astcenc_get_error_string(status)));
const Image::Format target_format = is_hdr ? Image::FORMAT_RGBAH : Image::FORMAT_RGBA8;
const bool has_mipmaps = r_img->has_mipmaps();
int width = r_img->get_width();
int height = r_img->get_height();
<fim_middle> astcenc_error status = astcenc_config_init(profile, block_x, block_y, 1, quality, flags, &config);<fim_end>
|
is_hdr = false;
} break;
case Image::FORMAT_ASTC_4x4_HDR: {
block_x = 4;
block_y = 4;
is_hdr = true;
} break;
case Image::FORMAT_ASTC_8x8: {
block_x = 8;
block_y = 8;
is_hdr = false;
} break;
case Image::FORMAT_ASTC_8x8_HDR: {
block_x = 8;
block_y = 8;
is_hdr = true;
} break;
default: {
ERR_FAIL_MSG(vformat("astcenc: Cannot decompress Image with a non-ASTC format: %s.", Image::get_format_name(src_format)));
} break;
}
// Initialize astcenc.
const astcenc_profile profile = is_hdr ? ASTCENC_PRF_HDR : ASTCENC_PRF_LDR;
astcenc_config config;
const float quality = ASTCENC_PRE_MEDIUM;
const uint32_t flags = ASTCENC_FLG_DECOMPRESS_ONLY;
|
astcenc_error status = astcenc_config_init(profile, block_x, block_y, 1, quality, flags, &config);
|
ERR_FAIL_COND_MSG(status != ASTCENC_SUCCESS,
vformat("astcenc: Configuration initialization failed: %s.", astcenc_get_error_string(status)));
// Context allocation.
astcenc_context *context = nullptr;
const unsigned int thread_count = 1;
status = astcenc_context_alloc(&config, thread_count, &context);
ERR_FAIL_COND_MSG(status != ASTCENC_SUCCESS,
vformat("astcenc: Context allocation failed: %s.", astcenc_get_error_string(status)));
const Image::Format target_format = is_hdr ? Image::FORMAT_RGBAH : Image::FORMAT_RGBA8;
const bool has_mipmaps = r_img->has_mipmaps();
int width = r_img->get_width();
int height = r_img->get_height();
|
random
|
<fim_prefix>->Next(RIL_BLOCK);
continue;
case PT_NOISE:
tprintf("TODO: Please report image which triggers the noise case.\n");
ASSERT_HOST(false);
default:
break;
}
if (res_it->IsAtBeginningOf(RIL_BLOCK)) {
alto_str << "\t\t\t\t<ComposedBlock ID=\"" << GetID("cblock", page_number, bcnt) << "\"";
AddBoxToAlto(res_it.get(), RIL_BLOCK, alto_str);
alto_str << "\n";
}
if (res_it->IsAtBeginningOf(RIL_PARA)) {
alto_str << "\t\t\t\t\t<TextBlock ID=\"" << GetID("block", page_number, tcnt) << "\"";
AddBoxToAlto(res_it.get(), RIL_PARA, alto_str);
alto_str << "\n";
}
if (res_it->IsAtBeginningOf(RIL_TEXTLINE)) <fim_suffix>
AddBoxToAlto(res_it.get(), RIL_TEXTLINE, alto_str);
alto_str << "\n";
}
alto_str << "\t\t\t\t\t\t\t<String ID=\"" << GetID("string", page_number, wcnt) << "\"";
AddBoxToAlto(res_it.get(), RIL_WORD, alto_str);
alto_str << " CONTENT=\"";
bool last_word_in_line = res_it->IsAtFinalElement(RIL_TEXTLINE, RIL_WORD);
bool last_word_in_tblock = res_it->IsAtFinalElement(RIL_PARA, RIL_WORD);
bool last_word_in_cblock = res_it->IsAtFinalElement(RIL_BLOCK, RIL_WORD);
res_it->BoundingBox(RIL_WORD, &left, &top, &right, &bottom);
do {
const std::unique_ptr<const char[]> grapheme(res_it->GetUTF8Text(RIL_SYMBOL));
if (grapheme && grapheme[0] != 0<fim_middle>{
alto_str << "\t\t\t\t\t\t<TextLine ID=\"" << GetID("line", page_number, lcnt) << "\"";<fim_end>
|
->Next(RIL_BLOCK);
continue;
case PT_NOISE:
tprintf("TODO: Please report image which triggers the noise case.\n");
ASSERT_HOST(false);
default:
break;
}
if (res_it->IsAtBeginningOf(RIL_BLOCK)) {
alto_str << "\t\t\t\t<ComposedBlock ID=\"" << GetID("cblock", page_number, bcnt) << "\"";
AddBoxToAlto(res_it.get(), RIL_BLOCK, alto_str);
alto_str << "\n";
}
if (res_it->IsAtBeginningOf(RIL_PARA)) {
alto_str << "\t\t\t\t\t<TextBlock ID=\"" << GetID("block", page_number, tcnt) << "\"";
AddBoxToAlto(res_it.get(), RIL_PARA, alto_str);
alto_str << "\n";
}
if (res_it->IsAtBeginningOf(RIL_TEXTLINE))
|
{
alto_str << "\t\t\t\t\t\t<TextLine ID=\"" << GetID("line", page_number, lcnt) << "\"";
|
AddBoxToAlto(res_it.get(), RIL_TEXTLINE, alto_str);
alto_str << "\n";
}
alto_str << "\t\t\t\t\t\t\t<String ID=\"" << GetID("string", page_number, wcnt) << "\"";
AddBoxToAlto(res_it.get(), RIL_WORD, alto_str);
alto_str << " CONTENT=\"";
bool last_word_in_line = res_it->IsAtFinalElement(RIL_TEXTLINE, RIL_WORD);
bool last_word_in_tblock = res_it->IsAtFinalElement(RIL_PARA, RIL_WORD);
bool last_word_in_cblock = res_it->IsAtFinalElement(RIL_BLOCK, RIL_WORD);
res_it->BoundingBox(RIL_WORD, &left, &top, &right, &bottom);
do {
const std::unique_ptr<const char[]> grapheme(res_it->GetUTF8Text(RIL_SYMBOL));
if (grapheme && grapheme[0] != 0
|
ast_based
|
<fim_prefix> ClearResults();
}
if (!block_list_->empty()) {
return 0;
}
if (tesseract_ == nullptr) {
tesseract_ = new Tesseract;
#ifndef DISABLED_LEGACY_ENGINE
tesseract_->InitAdaptiveClassifier(nullptr);
#endif
}
if (tesseract_->pix_binary() == nullptr && !Threshold(&tesseract_->mutable_pix_binary()->pix_)) {
return -1;
}
tesseract_->PrepareForPageseg();
#ifndef DISABLED_LEGACY_ENGINE
if (tesseract_->textord_equation_detect) {
if (equ_detect_ == nullptr && !datapath_.empty()) {
equ_detect_ = new EquationDetect(datapath_.c_str(), nullptr);
}
if (equ_detect_ == nullptr) {
tprintf("Warning: Could not set equation detector\n");
} else {<fim_suffix>#endif // ndef DISABLED_LEGACY_ENGINE
Tesseract *osd_tess = osd_tesseract_;
OSResults osr;
#ifndef DISABLED_LEGACY_ENGINE
if (PSM_OSD_ENABLED(tesseract_->tessedit_pageseg_mode) && osd_tess == nullptr) {
if (strcmp(language_.c_str(), "osd") == 0) {
osd_tess = tesseract_;
} else {
osd_tesseract_ = new Tesseract;
TessdataManager mgr(reader_);
if (datapath_.empty()) {
tprintf(
"Warning: Auto orientation and script detection requested,"
" but data path is undefined\n");
delete osd_tesseract_;
osd_tesseract_ = nullptr;
} else if (osd_tesseract_->init_tesseract(datapath_, "", "osd", OEM_TESSERACT_ONLY,<fim_middle> tesseract_->SetEquationDetect(equ_detect_);
}
}<fim_end>
|
ClearResults();
}
if (!block_list_->empty()) {
return 0;
}
if (tesseract_ == nullptr) {
tesseract_ = new Tesseract;
#ifndef DISABLED_LEGACY_ENGINE
tesseract_->InitAdaptiveClassifier(nullptr);
#endif
}
if (tesseract_->pix_binary() == nullptr && !Threshold(&tesseract_->mutable_pix_binary()->pix_)) {
return -1;
}
tesseract_->PrepareForPageseg();
#ifndef DISABLED_LEGACY_ENGINE
if (tesseract_->textord_equation_detect) {
if (equ_detect_ == nullptr && !datapath_.empty()) {
equ_detect_ = new EquationDetect(datapath_.c_str(), nullptr);
}
if (equ_detect_ == nullptr) {
tprintf("Warning: Could not set equation detector\n");
} else {
|
tesseract_->SetEquationDetect(equ_detect_);
}
}
|
#endif // ndef DISABLED_LEGACY_ENGINE
Tesseract *osd_tess = osd_tesseract_;
OSResults osr;
#ifndef DISABLED_LEGACY_ENGINE
if (PSM_OSD_ENABLED(tesseract_->tessedit_pageseg_mode) && osd_tess == nullptr) {
if (strcmp(language_.c_str(), "osd") == 0) {
osd_tess = tesseract_;
} else {
osd_tesseract_ = new Tesseract;
TessdataManager mgr(reader_);
if (datapath_.empty()) {
tprintf(
"Warning: Auto orientation and script detection requested,"
" but data path is undefined\n");
delete osd_tesseract_;
osd_tesseract_ = nullptr;
} else if (osd_tesseract_->init_tesseract(datapath_, "", "osd", OEM_TESSERACT_ONLY,
|
random
|
<fim_prefix>
namespace calib
{
#define OVERLAY_DELAY 1000
#define IMAGE_MAX_WIDTH 1280
#define IMAGE_MAX_HEIGHT 960
bool showOverlayMessage(const std::string& message);
enum InputType { Video, Pictures };
enum InputVideoSource { Camera, File };
enum TemplateType { AcirclesGrid, Chessboard, ChArUco, DoubleAcirclesGrid, CirclesGrid };
static const std::string mainWindowName = "Calibration";
static const std::string gridWindowName = "Board locations";
static const std::string consoleHelp = "Hot keys:\nesc - exit application\n"
"s - save current data to .xml file\n"
"r - delete last frame\n"<fim_suffix> static const double sigmaMult = 1.96;
struct calibrationData
{
cv::Mat cameraMatrix;
cv::Mat distCoeffs;
cv::Mat stdDeviations;
cv::Mat perViewErrors;
std::vector<cv::Mat> rvecs;
std::vector<cv::Mat> tvecs;
double totalAvgErr;
cv::Size imageSize;
std::vector<cv::Mat> allFrames;
std::vector<std::vector<cv::Point2f> > imagePoints;
std::vector< std::vector<cv::Point3f> > objectPoints;
std::vector<cv::Mat> allCharucoCorners;
std::vector<cv::Mat> allCharucoIds;
cv::Mat undistMap1, undistMap2;
calibrationData()
{<fim_middle> "u - enable/disable applying undistortion\n"
"d - delete all frames\n"
"v - switch visualization";
<fim_end>
|
namespace calib
{
#define OVERLAY_DELAY 1000
#define IMAGE_MAX_WIDTH 1280
#define IMAGE_MAX_HEIGHT 960
bool showOverlayMessage(const std::string& message);
enum InputType { Video, Pictures };
enum InputVideoSource { Camera, File };
enum TemplateType { AcirclesGrid, Chessboard, ChArUco, DoubleAcirclesGrid, CirclesGrid };
static const std::string mainWindowName = "Calibration";
static const std::string gridWindowName = "Board locations";
static const std::string consoleHelp = "Hot keys:\nesc - exit application\n"
"s - save current data to .xml file\n"
"r - delete last frame\n"
|
"u - enable/disable applying undistortion\n"
"d - delete all frames\n"
"v - switch visualization";
|
static const double sigmaMult = 1.96;
struct calibrationData
{
cv::Mat cameraMatrix;
cv::Mat distCoeffs;
cv::Mat stdDeviations;
cv::Mat perViewErrors;
std::vector<cv::Mat> rvecs;
std::vector<cv::Mat> tvecs;
double totalAvgErr;
cv::Size imageSize;
std::vector<cv::Mat> allFrames;
std::vector<std::vector<cv::Point2f> > imagePoints;
std::vector< std::vector<cv::Point3f> > objectPoints;
std::vector<cv::Mat> allCharucoCorners;
std::vector<cv::Mat> allCharucoIds;
cv::Mat undistMap1, undistMap2;
calibrationData()
{
|
random
|
<fim_prefix>bility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (p_vertical) {
accesskit_node_set_orientation(ae->node, ACCESSKIT_ORIENTATION_VERTICAL);
} else {
accesskit_node_set_orientation(ae->node, ACCESSKIT_ORIENTATION_HORIZONTAL);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_shortcut(const RID &p_id, const String &p_shortcut) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = <fim_suffix>;
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_shortcut.is_empty()) {
accesskit_node_set_keyboard_shortcut(ae->node, p_shortcut.utf8().ptr());
} else {
accesskit_node_clear_keyboard_shortcut(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_url(const RID &p_id, const String &p_url) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_url.is_empty()) {
accesskit_node_set_url(ae->node, p_url.utf8().ptr());
} else {
accesskit_node_clear_url(ae-><fim_middle>rid_owner.get_or_null(p_id)<fim_end>
|
bility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (p_vertical) {
accesskit_node_set_orientation(ae->node, ACCESSKIT_ORIENTATION_VERTICAL);
} else {
accesskit_node_set_orientation(ae->node, ACCESSKIT_ORIENTATION_HORIZONTAL);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_shortcut(const RID &p_id, const String &p_shortcut) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae =
|
rid_owner.get_or_null(p_id)
|
;
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_shortcut.is_empty()) {
accesskit_node_set_keyboard_shortcut(ae->node, p_shortcut.utf8().ptr());
} else {
accesskit_node_clear_keyboard_shortcut(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_url(const RID &p_id, const String &p_url) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_url.is_empty()) {
accesskit_node_set_url(ae->node, p_url.utf8().ptr());
} else {
accesskit_node_clear_url(ae->
|
ast_based
|
<fim_prefix> ggml_set_f32_nd(diff, ir, ic, 0, 0, f);
}
}
} else {
// only copy
memcpy(diff->data, diff_tmp.data(), ggml_nbytes(diff));
}
v_diff.push_back(diff);
print_debug_tensor(diff);
// free memory of diff_tmp
diff_tmp.resize(0);
}
}
~train_context() {
for (auto ptr : v_final) free(ptr->data);
for (auto ptr : v_diff) free(ptr->data);
// no need to free v_diff_tmp, since we didn't use malloc
ggml_free(ctx_ggml);
}
};
struct tokenized_prompt {<fim_suffix> size_t max_seq_len;
tokenized_prompt(llama_context * ctx, std::string pos, std::string neg) {
const llama_model * model = llama_get_model(ctx);
const llama_vocab * vocab = llama_model_get_vocab(model);
const bool add_bos = llama_vocab_get_add_bos(vocab);
tokens_pos = common_tokenize(ctx, pos, add_bos, true);
tokens_neg = common_tokenize(ctx, neg, add_bos, true);
max_seq_len = std::max(tokens_pos.size(), tokens_neg.size());
padding_seq(ctx, tokens_pos, max_seq_len);
padding_seq(ctx, tokens_neg, max_seq_len);
}
void padding_seq(llama_context * ctx, std::vector<llama_token> & tokens, size_t len) {<fim_middle> std::vector<llama_token> tokens_pos;
std::vector<llama_token> tokens_neg;<fim_end>
|
ggml_set_f32_nd(diff, ir, ic, 0, 0, f);
}
}
} else {
// only copy
memcpy(diff->data, diff_tmp.data(), ggml_nbytes(diff));
}
v_diff.push_back(diff);
print_debug_tensor(diff);
// free memory of diff_tmp
diff_tmp.resize(0);
}
}
~train_context() {
for (auto ptr : v_final) free(ptr->data);
for (auto ptr : v_diff) free(ptr->data);
// no need to free v_diff_tmp, since we didn't use malloc
ggml_free(ctx_ggml);
}
};
struct tokenized_prompt {
|
std::vector<llama_token> tokens_pos;
std::vector<llama_token> tokens_neg;
|
size_t max_seq_len;
tokenized_prompt(llama_context * ctx, std::string pos, std::string neg) {
const llama_model * model = llama_get_model(ctx);
const llama_vocab * vocab = llama_model_get_vocab(model);
const bool add_bos = llama_vocab_get_add_bos(vocab);
tokens_pos = common_tokenize(ctx, pos, add_bos, true);
tokens_neg = common_tokenize(ctx, neg, add_bos, true);
max_seq_len = std::max(tokens_pos.size(), tokens_neg.size());
padding_seq(ctx, tokens_pos, max_seq_len);
padding_seq(ctx, tokens_neg, max_seq_len);
}
void padding_seq(llama_context * ctx, std::vector<llama_token> & tokens, size_t len) {
|
random
|
<fim_prefix>#ifndef DISABLED_LEGACY_ENGINE
if (tesseract_->tessedit_resegment_from_line_boxes) {
page_res_ = tesseract_->ApplyBoxes(input_file_.c_str(), true, block_list_);
} else if (tesseract_->tessedit_resegment_from_boxes) {
page_res_ = tesseract_->ApplyBoxes(input_file_.c_str(), false, block_list_);
} else
#endif // ndef DISABLED_LEGACY_ENGINE
{
page_res_ =
new PAGE_RES(tesseract_->AnyLSTMLang(), block_list_, &tesseract_->prev_word_best_choice_);
}
if (page_res_ == nullptr) {
return -1;
}
if (tesseract_->tessedit_train_line_recognizer) {
if (!tesseract_->TrainLineRecognizer(input_file_.c_str(), output_file_, block_list_)) {
return -1;
}<fim_suffix> if (tesseract_->tessedit_make_boxes_from_boxes) {
tesseract_->CorrectClassifyWords(page_res_);
return 0;
}
#endif // ndef DISABLED_LEGACY_ENGINE
int result = 0;
if (tesseract_->interactive_display_mode) {
#ifndef GRAPHICS_DISABLED
tesseract_->pgeditor_main(rect_width_, rect_height_, page_res_);
#endif // !GRAPHICS_DISABLED
// The page_res is invalid after an interactive session, so cleanup
// in a way that lets us continue to the next page without crashing.
delete page_res_;
page_res_ = nullptr;
return -1;
#ifndef DISABLED_LEGACY_ENGINE
} else if (tesseract_->tessedit_train_from_boxes) {
std::string fontname;<fim_middle> tesseract_->CorrectClassifyWords(page_res_);
return 0;
}
#ifndef DISABLED_LEGACY_ENGINE<fim_end>
|
#ifndef DISABLED_LEGACY_ENGINE
if (tesseract_->tessedit_resegment_from_line_boxes) {
page_res_ = tesseract_->ApplyBoxes(input_file_.c_str(), true, block_list_);
} else if (tesseract_->tessedit_resegment_from_boxes) {
page_res_ = tesseract_->ApplyBoxes(input_file_.c_str(), false, block_list_);
} else
#endif // ndef DISABLED_LEGACY_ENGINE
{
page_res_ =
new PAGE_RES(tesseract_->AnyLSTMLang(), block_list_, &tesseract_->prev_word_best_choice_);
}
if (page_res_ == nullptr) {
return -1;
}
if (tesseract_->tessedit_train_line_recognizer) {
if (!tesseract_->TrainLineRecognizer(input_file_.c_str(), output_file_, block_list_)) {
return -1;
}
|
tesseract_->CorrectClassifyWords(page_res_);
return 0;
}
#ifndef DISABLED_LEGACY_ENGINE
|
if (tesseract_->tessedit_make_boxes_from_boxes) {
tesseract_->CorrectClassifyWords(page_res_);
return 0;
}
#endif // ndef DISABLED_LEGACY_ENGINE
int result = 0;
if (tesseract_->interactive_display_mode) {
#ifndef GRAPHICS_DISABLED
tesseract_->pgeditor_main(rect_width_, rect_height_, page_res_);
#endif // !GRAPHICS_DISABLED
// The page_res is invalid after an interactive session, so cleanup
// in a way that lets us continue to the next page without crashing.
delete page_res_;
page_res_ = nullptr;
return -1;
#ifndef DISABLED_LEGACY_ENGINE
} else if (tesseract_->tessedit_train_from_boxes) {
std::string fontname;
|
random
|
<fim_prefix> }
// 6 - (undo) Reinsert overlapped keys.
List<AnimMoveRestore>::ConstIterator restore_itr = to_restore.begin();
List<Animation::HandleMode>::ConstIterator handle_itr = to_restore_handle_modes.begin();
for (; restore_itr != to_restore.end() && handle_itr != to_restore_handle_modes.end(); ++restore_itr, ++handle_itr) {
const AnimMoveRestore &amr = *restore_itr;
Array key = amr.key;
undo_redo->add_undo_method(animation.ptr(), "track_insert_key", amr.track, amr.time, amr.key, 1);
undo_redo->add_undo_method(
this,
"_bezier_track_insert_key_at_anim",
animation,
amr.track,<fim_suffix> *handle_itr);
}
undo_redo->add_do_method(this, "_clear_selection_for_anim", animation);
undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation);
// 7 - Reselect.
int i = 0;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t oldpos = animation->track_get_key_time(E->get().first, E->get().second);
real_t newpos = oldpos + moving_selection_offset.x;
undo_redo->add_do_method(this, "_select_at_anim", animation, E->get().first, newpos, i == 0);
undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, oldpos, i == 0);
i++;
}
<fim_middle> amr.time,
key[0],
Vector2(key[1], key[2]),
Vector2(key[3], key[4]),<fim_end>
|
}
// 6 - (undo) Reinsert overlapped keys.
List<AnimMoveRestore>::ConstIterator restore_itr = to_restore.begin();
List<Animation::HandleMode>::ConstIterator handle_itr = to_restore_handle_modes.begin();
for (; restore_itr != to_restore.end() && handle_itr != to_restore_handle_modes.end(); ++restore_itr, ++handle_itr) {
const AnimMoveRestore &amr = *restore_itr;
Array key = amr.key;
undo_redo->add_undo_method(animation.ptr(), "track_insert_key", amr.track, amr.time, amr.key, 1);
undo_redo->add_undo_method(
this,
"_bezier_track_insert_key_at_anim",
animation,
amr.track,
|
amr.time,
key[0],
Vector2(key[1], key[2]),
Vector2(key[3], key[4]),
|
*handle_itr);
}
undo_redo->add_do_method(this, "_clear_selection_for_anim", animation);
undo_redo->add_undo_method(this, "_clear_selection_for_anim", animation);
// 7 - Reselect.
int i = 0;
for (SelectionSet::Element *E = selection.back(); E; E = E->prev()) {
real_t oldpos = animation->track_get_key_time(E->get().first, E->get().second);
real_t newpos = oldpos + moving_selection_offset.x;
undo_redo->add_do_method(this, "_select_at_anim", animation, E->get().first, newpos, i == 0);
undo_redo->add_undo_method(this, "_select_at_anim", animation, E->get().first, oldpos, i == 0);
i++;
}
|
random
|
<fim_prefix>#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
namespace {
// Implementation of varhandle that binds a VarHandleOp to an XlaResource of the
// same name. It is not safe to use this op in a JIT context.
class XlaAotOnlyVarHandleOp : public XlaOpKernel {
public:
explicit XlaAotOnlyVarHandleOp(OpKernelConstruction* c);
void Compile(XlaOpKernelContext* context) override;
private:
string name_;
};
XlaAotOnlyVarHandleOp::XlaAotOnlyVarHandleOp(OpKernelConstruction* c)
: XlaOpKernel(c) {
OP_REQUIRES_OK(c, c->GetAttr("shared_name", &name_));
}<fim_suffix> // Look for a resource of the same name. TF also keys that on the container
// and type attributes, but that doesn't seem necessary.
for (const auto& resource : context->xla_context()->resources()) {
if (resource->kind() == XlaResource::kVariable &&
resource->name() == name_) {
context->SetResourceOutput(0, resource.get());
return;
}
}
context->SetStatus(
errors::InvalidArgument("Variable: ", name_, " not configured"));
}
} // namespace
REGISTER_OP(tfcompile::kXlaAotOnlyVarHandleOp)
.Doc(R"doc(
Internal VarHandleOp registration used for XLA AOT compilation.
)doc")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")<fim_middle>
void XlaAotOnlyVarHandleOp::Compile(XlaOpKernelContext* context) {<fim_end>
|
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
namespace {
// Implementation of varhandle that binds a VarHandleOp to an XlaResource of the
// same name. It is not safe to use this op in a JIT context.
class XlaAotOnlyVarHandleOp : public XlaOpKernel {
public:
explicit XlaAotOnlyVarHandleOp(OpKernelConstruction* c);
void Compile(XlaOpKernelContext* context) override;
private:
string name_;
};
XlaAotOnlyVarHandleOp::XlaAotOnlyVarHandleOp(OpKernelConstruction* c)
: XlaOpKernel(c) {
OP_REQUIRES_OK(c, c->GetAttr("shared_name", &name_));
}
|
void XlaAotOnlyVarHandleOp::Compile(XlaOpKernelContext* context) {
|
// Look for a resource of the same name. TF also keys that on the container
// and type attributes, but that doesn't seem necessary.
for (const auto& resource : context->xla_context()->resources()) {
if (resource->kind() == XlaResource::kVariable &&
resource->name() == name_) {
context->SetResourceOutput(0, resource.get());
return;
}
}
context->SetStatus(
errors::InvalidArgument("Variable: ", name_, " not configured"));
}
} // namespace
REGISTER_OP(tfcompile::kXlaAotOnlyVarHandleOp)
.Doc(R"doc(
Internal VarHandleOp registration used for XLA AOT compilation.
)doc")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
|
random
|
<fim_prefix>ring(animation->track_get_path(selected_track));
if (is_filtered) {
if (root && root->has_node(base_path)) {
Node *node = root->get_node(base_path);
if (!node || !EditorNode::get_singleton()->get_editor_selection()->is_selected(node)) {
for (int i = 0; i < animation->get_track_count(); ++i) {
if (animation->track_get_type(i) != Animation::TrackType::TYPE_BEZIER) {
continue;
}
base_path = String(animation->track_get_path(i));
if (root && root->has_node(base_path)) {
node = root->get_node(base_path);
if (!node) {
continue; // No node, no filter.
}
if (!EditorNode::get_singleton()->get_editor_selection()->is_selected(node)) <fim_suffix>
set_animation_and_track(animation, i, read_only);
break;
}
}
}
}
}
queue_redraw();
}
void AnimationBezierTrackEdit::auto_fit_vertically() {
int track_count = animation->get_track_count();
real_t minimum_value = Math::INF;
real_t maximum_value = -Math::INF;
int nb_track_visible = 0;
for (int i = 0; i < track_count; ++i) {
if (!_is_track_curves_displayed(i) || locked_tracks.has(i)) {
continue;
}
int key_count = animation->track_get_key_count(i);
for (int j = 0; j < key_count; ++j) {
real_t value = animation->bezier_track_get_key_value(i, j);
minimum_value = MIN(value, minimum_value);
maximum_value = MAX(value, maximum_value);
// W<fim_middle>{
continue; // Skip track due to not selected.
}<fim_end>
|
ring(animation->track_get_path(selected_track));
if (is_filtered) {
if (root && root->has_node(base_path)) {
Node *node = root->get_node(base_path);
if (!node || !EditorNode::get_singleton()->get_editor_selection()->is_selected(node)) {
for (int i = 0; i < animation->get_track_count(); ++i) {
if (animation->track_get_type(i) != Animation::TrackType::TYPE_BEZIER) {
continue;
}
base_path = String(animation->track_get_path(i));
if (root && root->has_node(base_path)) {
node = root->get_node(base_path);
if (!node) {
continue; // No node, no filter.
}
if (!EditorNode::get_singleton()->get_editor_selection()->is_selected(node))
|
{
continue; // Skip track due to not selected.
}
|
set_animation_and_track(animation, i, read_only);
break;
}
}
}
}
}
queue_redraw();
}
void AnimationBezierTrackEdit::auto_fit_vertically() {
int track_count = animation->get_track_count();
real_t minimum_value = Math::INF;
real_t maximum_value = -Math::INF;
int nb_track_visible = 0;
for (int i = 0; i < track_count; ++i) {
if (!_is_track_curves_displayed(i) || locked_tracks.has(i)) {
continue;
}
int key_count = animation->track_get_key_count(i);
for (int j = 0; j < key_count; ++j) {
real_t value = animation->bezier_track_get_key_value(i, j);
minimum_value = MIN(value, minimum_value);
maximum_value = MAX(value, maximum_value);
// W
|
ast_based
|
<fim_prefix>);
recognition_done_ = true;
#ifndef DISABLED_LEGACY_ENGINE
if (tesseract_->tessedit_resegment_from_line_boxes) {
page_res_ = tesseract_->ApplyBoxes(input_file_.c_str(), true, block_list_);
} else if (tesseract_->tessedit_resegment_from_boxes) {
page_res_ = tesseract_->ApplyBoxes(input_file_.c_str(), false, block_list_);
} else
#endif // ndef DISABLED_LEGACY_ENGINE
{
page_res_ =
new PAGE_RES(tesseract_->AnyLSTMLang(), block_list_, &tesseract_->prev_word_best_choice_);
}
if (page_res_ == nullptr) {
return -1;
}
if (tesseract_->tessedit_train_line_recognizer) {
if (!tesseract_->TrainLineRecognizer(input_file_.c_str(), output_file_, block_list_)) <fim_suffix>
tesseract_->CorrectClassifyWords(page_res_);
return 0;
}
#ifndef DISABLED_LEGACY_ENGINE
if (tesseract_->tessedit_make_boxes_from_boxes) {
tesseract_->CorrectClassifyWords(page_res_);
return 0;
}
#endif // ndef DISABLED_LEGACY_ENGINE
int result = 0;
if (tesseract_->interactive_display_mode) {
#ifndef GRAPHICS_DISABLED
tesseract_->pgeditor_main(rect_width_, rect_height_, page_res_);
#endif // !GRAPHICS_DISABLED
// The page_res is invalid after an interactive session, so cleanup
// in a way that lets us continue to the next page without crashing.
delete page_res_;
page_res_ = nullptr;
return -1;
#ifndef DISABLED_LEGACY_ENGINE
} else if<fim_middle>{
return -1;
}<fim_end>
|
);
recognition_done_ = true;
#ifndef DISABLED_LEGACY_ENGINE
if (tesseract_->tessedit_resegment_from_line_boxes) {
page_res_ = tesseract_->ApplyBoxes(input_file_.c_str(), true, block_list_);
} else if (tesseract_->tessedit_resegment_from_boxes) {
page_res_ = tesseract_->ApplyBoxes(input_file_.c_str(), false, block_list_);
} else
#endif // ndef DISABLED_LEGACY_ENGINE
{
page_res_ =
new PAGE_RES(tesseract_->AnyLSTMLang(), block_list_, &tesseract_->prev_word_best_choice_);
}
if (page_res_ == nullptr) {
return -1;
}
if (tesseract_->tessedit_train_line_recognizer) {
if (!tesseract_->TrainLineRecognizer(input_file_.c_str(), output_file_, block_list_))
|
{
return -1;
}
|
tesseract_->CorrectClassifyWords(page_res_);
return 0;
}
#ifndef DISABLED_LEGACY_ENGINE
if (tesseract_->tessedit_make_boxes_from_boxes) {
tesseract_->CorrectClassifyWords(page_res_);
return 0;
}
#endif // ndef DISABLED_LEGACY_ENGINE
int result = 0;
if (tesseract_->interactive_display_mode) {
#ifndef GRAPHICS_DISABLED
tesseract_->pgeditor_main(rect_width_, rect_height_, page_res_);
#endif // !GRAPHICS_DISABLED
// The page_res is invalid after an interactive session, so cleanup
// in a way that lets us continue to the next page without crashing.
delete page_res_;
page_res_ = nullptr;
return -1;
#ifndef DISABLED_LEGACY_ENGINE
} else if
|
ast_based
|
<fim_prefix> ev->set_relative(p_event_pos - hover_prev_pos);
ev->set_relative_screen_position(ev->get_relative());
Input::get_singleton()->parse_input_event(ev);
hover_prev_pos = p_event_pos;
} break;
case AMOTION_EVENT_ACTION_DOWN:
case AMOTION_EVENT_ACTION_BUTTON_PRESS: {
// Release any remaining touches or mouse event
_release_mouse_event_info();
_release_all_touch();
mouse_event_info.valid = true;
mouse_event_info.pos = p_event_pos;
_parse_mouse_event_info(event_buttons_mask, true, false, p_double_click, p_source_mouse_relative);
} break;
case AMOTION_EVENT_ACTION_CANCEL: {
_cancel_mouse_event_info(p_source_mouse_relative);
} break;
<fim_suffix> case AMOTION_EVENT_ACTION_MOVE: {
if (!p_source_mouse_relative && !mouse_event_info.valid) {
return;
}
Ref<InputEventMouseMotion> ev;
ev.instantiate();
_set_key_modifier_state(ev, Key::NONE);
if (p_source_mouse_relative) {
ev->set_position(hover_prev_pos);
ev->set_global_position(hover_prev_pos);
ev->set_relative(p_event_pos);
ev->set_relative_screen_position(p_event_pos);
} else {
ev->set_position(p_event_pos);
ev->set_global_position(p_event_pos);
ev->set_relative(p_event_pos - hover_prev_pos);
ev->set_relative_screen_position(ev->get_relative());
mouse_event_info.pos = p_event_pos;
hover_prev_pos = p_event_pos;
}<fim_middle> case AMOTION_EVENT_ACTION_UP:
case AMOTION_EVENT_ACTION_BUTTON_RELEASE: {
_release_mouse_event_info(p_source_mouse_relative);
} break;
<fim_end>
|
ev->set_relative(p_event_pos - hover_prev_pos);
ev->set_relative_screen_position(ev->get_relative());
Input::get_singleton()->parse_input_event(ev);
hover_prev_pos = p_event_pos;
} break;
case AMOTION_EVENT_ACTION_DOWN:
case AMOTION_EVENT_ACTION_BUTTON_PRESS: {
// Release any remaining touches or mouse event
_release_mouse_event_info();
_release_all_touch();
mouse_event_info.valid = true;
mouse_event_info.pos = p_event_pos;
_parse_mouse_event_info(event_buttons_mask, true, false, p_double_click, p_source_mouse_relative);
} break;
case AMOTION_EVENT_ACTION_CANCEL: {
_cancel_mouse_event_info(p_source_mouse_relative);
} break;
|
case AMOTION_EVENT_ACTION_UP:
case AMOTION_EVENT_ACTION_BUTTON_RELEASE: {
_release_mouse_event_info(p_source_mouse_relative);
} break;
|
case AMOTION_EVENT_ACTION_MOVE: {
if (!p_source_mouse_relative && !mouse_event_info.valid) {
return;
}
Ref<InputEventMouseMotion> ev;
ev.instantiate();
_set_key_modifier_state(ev, Key::NONE);
if (p_source_mouse_relative) {
ev->set_position(hover_prev_pos);
ev->set_global_position(hover_prev_pos);
ev->set_relative(p_event_pos);
ev->set_relative_screen_position(p_event_pos);
} else {
ev->set_position(p_event_pos);
ev->set_global_position(p_event_pos);
ev->set_relative(p_event_pos - hover_prev_pos);
ev->set_relative_screen_position(ev->get_relative());
mouse_event_info.pos = p_event_pos;
hover_prev_pos = p_event_pos;
}
|
random
|
<fim_prefix>nimatedSprite2D::get_speed_scale() const {
return speed_scale;
}
float AnimatedSprite2D::get_playing_speed() const {
if (!playing) {
return 0;
}
return speed_scale * custom_speed_scale;
}
void AnimatedSprite2D::set_centered(bool p_center) {
if (centered == p_center) {
return;
}
centered = p_center;
queue_redraw();
item_rect_changed();
}
bool AnimatedSprite2D::is_centered() const {
return centered;
}
void AnimatedSprite2D::set_offset(const Point2 &p_offset) {
if (offset == p_offset) {
return;
}
offset = p_offset;
queue_redraw();
item_rect_changed();
}
Point2 AnimatedSprite2D::get_offset() const {
return offset;
}
void AnimatedSprite2D::set_flip_h(bool p_flip) {
<fim_suffix>
hflip = p_flip;
queue_redraw();
}
bool AnimatedSprite2D::is_flipped_h() const {
return hflip;
}
void AnimatedSprite2D::set_flip_v(bool p_flip) {
if (vflip == p_flip) {
return;
}
vflip = p_flip;
queue_redraw();
}
bool AnimatedSprite2D::is_flipped_v() const {
return vflip;
}
void AnimatedSprite2D::_res_changed() {
set_frame_and_progress(frame, frame_progress);
queue_redraw();
notify_property_list_changed();
}
bool AnimatedSprite2D::is_playing() const {
return playing;
}
void AnimatedSprite2D::set_autoplay(const String &p_name) {
if (is_inside_tree() && !Engine::get_singleton()->is_editor_hint()) {
WARN_PRINT("Setting autoplay after the node has been added to the scene<fim_middle>if (hflip == p_flip) {
return;
}<fim_end>
|
nimatedSprite2D::get_speed_scale() const {
return speed_scale;
}
float AnimatedSprite2D::get_playing_speed() const {
if (!playing) {
return 0;
}
return speed_scale * custom_speed_scale;
}
void AnimatedSprite2D::set_centered(bool p_center) {
if (centered == p_center) {
return;
}
centered = p_center;
queue_redraw();
item_rect_changed();
}
bool AnimatedSprite2D::is_centered() const {
return centered;
}
void AnimatedSprite2D::set_offset(const Point2 &p_offset) {
if (offset == p_offset) {
return;
}
offset = p_offset;
queue_redraw();
item_rect_changed();
}
Point2 AnimatedSprite2D::get_offset() const {
return offset;
}
void AnimatedSprite2D::set_flip_h(bool p_flip) {
|
if (hflip == p_flip) {
return;
}
|
hflip = p_flip;
queue_redraw();
}
bool AnimatedSprite2D::is_flipped_h() const {
return hflip;
}
void AnimatedSprite2D::set_flip_v(bool p_flip) {
if (vflip == p_flip) {
return;
}
vflip = p_flip;
queue_redraw();
}
bool AnimatedSprite2D::is_flipped_v() const {
return vflip;
}
void AnimatedSprite2D::_res_changed() {
set_frame_and_progress(frame, frame_progress);
queue_redraw();
notify_property_list_changed();
}
bool AnimatedSprite2D::is_playing() const {
return playing;
}
void AnimatedSprite2D::set_autoplay(const String &p_name) {
if (is_inside_tree() && !Engine::get_singleton()->is_editor_hint()) {
WARN_PRINT("Setting autoplay after the node has been added to the scene
|
ast_based
|
<fim_prefix> // load prompts
std::vector<std::string> positive_prompts = ctrlvec_load_prompt_file(params.cvector_positive_file, true);
std::vector<std::string> negative_prompts = ctrlvec_load_prompt_file(params.cvector_negative_file, true);
if (positive_prompts.size() != negative_prompts.size()) {
fprintf(stderr, "number of positive and negative prompts must be equal\n");
return 1;
}
if (positive_prompts.empty()) {
fprintf(stderr, "must provide at least one prompt pair\n");
return 1;
}
ctx_train.positive_entries = positive_prompts;
ctx_train.negative_entries = negative_prompts;
return 0;
}
int main(int argc, char ** argv) {<fim_suffix>
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_CVECTOR_GENERATOR, print_usage)) {
return 1;
}
if (params.n_pca_iterations % params.n_pca_batch != 0) {
fprintf(stderr, "PCA iterations must by multiply of PCA batch size\n");
return 1;
}
callback_data cb_data;
// pass the callback to the backend scheduler
// it will be executed for each node during the graph computation
params.cb_eval = cb_eval;
params.cb_eval_user_data = &cb_data;
params.warmup = false;
print_build_info();
llama_backend_init();
llama_numa_init(params.numa);
// load the model to get hparams<fim_middle> common_params params;
params.out_file = "control_vector.gguf";<fim_end>
|
// load prompts
std::vector<std::string> positive_prompts = ctrlvec_load_prompt_file(params.cvector_positive_file, true);
std::vector<std::string> negative_prompts = ctrlvec_load_prompt_file(params.cvector_negative_file, true);
if (positive_prompts.size() != negative_prompts.size()) {
fprintf(stderr, "number of positive and negative prompts must be equal\n");
return 1;
}
if (positive_prompts.empty()) {
fprintf(stderr, "must provide at least one prompt pair\n");
return 1;
}
ctx_train.positive_entries = positive_prompts;
ctx_train.negative_entries = negative_prompts;
return 0;
}
int main(int argc, char ** argv) {
|
common_params params;
params.out_file = "control_vector.gguf";
|
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_CVECTOR_GENERATOR, print_usage)) {
return 1;
}
if (params.n_pca_iterations % params.n_pca_batch != 0) {
fprintf(stderr, "PCA iterations must by multiply of PCA batch size\n");
return 1;
}
callback_data cb_data;
// pass the callback to the backend scheduler
// it will be executed for each node during the graph computation
params.cb_eval = cb_eval;
params.cb_eval_user_data = &cb_data;
params.warmup = false;
print_build_info();
llama_backend_init();
llama_numa_init(params.numa);
// load the model to get hparams
|
random
|
<fim_prefix>).valid_word(word);
}
// Returns true if utf8_character is defined in the UniCharset.
bool TessBaseAPI::IsValidCharacter(const char *utf8_character) const {
return tesseract_->unicharset.contains_unichar(utf8_character);
}
// TODO(rays) Obsolete this function and replace with a more aptly named
// function that returns image coordinates rather than tesseract coordinates.
bool TessBaseAPI::GetTextDirection(int *out_offset, float *out_slope) {
const std::unique_ptr<const PageIterator> it(AnalyseLayout());
if (it == nullptr) {
return false;
}
int x1, x2, y1, y2;
it->Baseline(RIL_TEXTLINE, &x1, &y1, &x2, &y2);
// Calculate offset and slope (NOTE: Kind of ugly)
if (x2 <= x1) <fim_suffix>
// Convert the point pair to slope/offset of the baseline (in image coords.)
*out_slope = static_cast<float>(y2 - y1) / (x2 - x1);
*out_offset = static_cast<int>(y1 - *out_slope * x1);
// Get the y-coord of the baseline at the left and right edges of the
// textline's bounding box.
int left, top, right, bottom;
if (!it->BoundingBox(RIL_TEXTLINE, &left, &top, &right, &bottom)) {
return false;
}
int left_y = IntCastRounded(*out_slope * left + *out_offset);
int right_y = IntCastRounded(*out_slope * right + *out_offset);
// Shift the baseline down so it passes through the nearest bottom-corner
// of the textline's bounding box. This is the difference between the y
<fim_middle>{
x2 = x1 + 1;
}<fim_end>
|
).valid_word(word);
}
// Returns true if utf8_character is defined in the UniCharset.
bool TessBaseAPI::IsValidCharacter(const char *utf8_character) const {
return tesseract_->unicharset.contains_unichar(utf8_character);
}
// TODO(rays) Obsolete this function and replace with a more aptly named
// function that returns image coordinates rather than tesseract coordinates.
bool TessBaseAPI::GetTextDirection(int *out_offset, float *out_slope) {
const std::unique_ptr<const PageIterator> it(AnalyseLayout());
if (it == nullptr) {
return false;
}
int x1, x2, y1, y2;
it->Baseline(RIL_TEXTLINE, &x1, &y1, &x2, &y2);
// Calculate offset and slope (NOTE: Kind of ugly)
if (x2 <= x1)
|
{
x2 = x1 + 1;
}
|
// Convert the point pair to slope/offset of the baseline (in image coords.)
*out_slope = static_cast<float>(y2 - y1) / (x2 - x1);
*out_offset = static_cast<int>(y1 - *out_slope * x1);
// Get the y-coord of the baseline at the left and right edges of the
// textline's bounding box.
int left, top, right, bottom;
if (!it->BoundingBox(RIL_TEXTLINE, &left, &top, &right, &bottom)) {
return false;
}
int left_y = IntCastRounded(*out_slope * left + *out_offset);
int right_y = IntCastRounded(*out_slope * right + *out_offset);
// Shift the baseline down so it passes through the nearest bottom-corner
// of the textline's bounding box. This is the difference between the y
|
ast_based
|
<fim_prefix>r(), "<0x%02hhX>", &byte_val) == 1) {
// Text of byte tokens is already in the expected format.
type = LLAMA_TOKEN_TYPE_BYTE;
} else {
type = LLAMA_TOKEN_TYPE_NORMAL;
}
text = llama_escape_whitespaces(text);
vocab->id_to_token[id].text = text;
vocab->id_to_token[id].score = score;
vocab->id_to_token[id].type = type;
vocab->token_to_id.emplace(text, id);
}
}
}
static void convert_weights_ak_to_gg(struct ggml_tensor * gg_weights, const float * karpathy_weights) {
int size = 1;
for (int dim = 0; dim < ggml_n_dims(gg_weights); ++dim) {
<fim_suffix>
}
for (int ct = 0; ct < size; ++ct) {
int64_t i0 = 0; int64_t i1 = 0;
int64_t i2 = 0; int64_t i3 = 0;
ggml_unravel_index(gg_weights, ct, &i0, &i1, &i2, &i3);
ggml_set_f32_nd(gg_weights, i0, i1, i2, i3, karpathy_weights[ct]);
}
}
static void save_as_llama_model(
struct my_llama_vocab * vocab, struct my_llama_model * model, TransformerWeights* w, const char * filename
) {
// convert AK weights into GG weights one by one.
// w->token_embedding_table -> model->tok_embeddings
// float* -> struct ggml_tensor
convert_weights_ak_to_gg(model->tok_embeddings, w->token_embedding_table.data());
convert_weights_ak_to_<fim_middle>size *= gg_weights->ne[dim];<fim_end>
|
r(), "<0x%02hhX>", &byte_val) == 1) {
// Text of byte tokens is already in the expected format.
type = LLAMA_TOKEN_TYPE_BYTE;
} else {
type = LLAMA_TOKEN_TYPE_NORMAL;
}
text = llama_escape_whitespaces(text);
vocab->id_to_token[id].text = text;
vocab->id_to_token[id].score = score;
vocab->id_to_token[id].type = type;
vocab->token_to_id.emplace(text, id);
}
}
}
static void convert_weights_ak_to_gg(struct ggml_tensor * gg_weights, const float * karpathy_weights) {
int size = 1;
for (int dim = 0; dim < ggml_n_dims(gg_weights); ++dim) {
|
size *= gg_weights->ne[dim];
|
}
for (int ct = 0; ct < size; ++ct) {
int64_t i0 = 0; int64_t i1 = 0;
int64_t i2 = 0; int64_t i3 = 0;
ggml_unravel_index(gg_weights, ct, &i0, &i1, &i2, &i3);
ggml_set_f32_nd(gg_weights, i0, i1, i2, i3, karpathy_weights[ct]);
}
}
static void save_as_llama_model(
struct my_llama_vocab * vocab, struct my_llama_model * model, TransformerWeights* w, const char * filename
) {
// convert AK weights into GG weights one by one.
// w->token_embedding_table -> model->tok_embeddings
// float* -> struct ggml_tensor
convert_weights_ak_to_gg(model->tok_embeddings, w->token_embedding_table.data());
convert_weights_ak_to_
|
ast_based
|
<fim_prefix>har>(borderValue[i]);
}
break;
case CV_HAL_BORDER_REPLICATE:
ctx->border = CAROTENE_NS::BORDER_MODE_REPLICATE;
break;
case CV_HAL_BORDER_REFLECT:
ctx->border = CAROTENE_NS::BORDER_MODE_REFLECT;
break;
case CV_HAL_BORDER_WRAP:
ctx->border = CAROTENE_NS::BORDER_MODE_WRAP;
break;
case CV_HAL_BORDER_REFLECT_101:
ctx->border = CAROTENE_NS::BORDER_MODE_REFLECT101;
break;
default:
delete ctx;
return CV_HAL_ERROR_NOT_IMPLEMENTED;
}
*context = (cvhalFilter2D*)(ctx);
return CV_HAL_ERROR_OK;
}
inline int TEGRA_MORPHFREE(cvhalFilter2D *context)
{
if(context)
{
<fim_suffix>
return CV_HAL_ERROR_OK;
}
else
{
return CV_HAL_ERROR_UNKNOWN;
}
}
#define TEGRA_MORPHIMPL(context, src_data, src_step, dst_data, dst_step, width, height, src_full_width, src_full_height, src_roi_x, src_roi_y, dst_full_width, dst_full_height, dst_roi_x, dst_roi_y) \
( \
(void)dst_full_width, (void)dst_full_height, (void)dst_roi_x, (void)dst_roi_y, \
context && CAROTENE_NS::isSupportedConfiguration() ? \
((MorphCtx*)context)->operation == CV_HAL_MORPH_ERODE ? \
CAROTENE_NS::erode(CAROTENE_NS::Size2D(width, height), ((MorphCtx*)context)->channels, \
src_data, src_step, dst_data, dst_step, \
<fim_middle>delete (MorphCtx*)context;<fim_end>
|
har>(borderValue[i]);
}
break;
case CV_HAL_BORDER_REPLICATE:
ctx->border = CAROTENE_NS::BORDER_MODE_REPLICATE;
break;
case CV_HAL_BORDER_REFLECT:
ctx->border = CAROTENE_NS::BORDER_MODE_REFLECT;
break;
case CV_HAL_BORDER_WRAP:
ctx->border = CAROTENE_NS::BORDER_MODE_WRAP;
break;
case CV_HAL_BORDER_REFLECT_101:
ctx->border = CAROTENE_NS::BORDER_MODE_REFLECT101;
break;
default:
delete ctx;
return CV_HAL_ERROR_NOT_IMPLEMENTED;
}
*context = (cvhalFilter2D*)(ctx);
return CV_HAL_ERROR_OK;
}
inline int TEGRA_MORPHFREE(cvhalFilter2D *context)
{
if(context)
{
|
delete (MorphCtx*)context;
|
return CV_HAL_ERROR_OK;
}
else
{
return CV_HAL_ERROR_UNKNOWN;
}
}
#define TEGRA_MORPHIMPL(context, src_data, src_step, dst_data, dst_step, width, height, src_full_width, src_full_height, src_roi_x, src_roi_y, dst_full_width, dst_full_height, dst_roi_x, dst_roi_y) \
( \
(void)dst_full_width, (void)dst_full_height, (void)dst_roi_x, (void)dst_roi_y, \
context && CAROTENE_NS::isSupportedConfiguration() ? \
((MorphCtx*)context)->operation == CV_HAL_MORPH_ERODE ? \
CAROTENE_NS::erode(CAROTENE_NS::Size2D(width, height), ((MorphCtx*)context)->channels, \
src_data, src_step, dst_data, dst_step, \
|
ast_based
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.