text
stringlengths 558
4.54k
| prefix
stringlengths 100
2k
| middle
stringlengths 10
500
| suffix
stringlengths 100
2k
| type
stringclasses 2
values |
|---|---|---|---|---|
<|fim_prefix|>tion == p_name) {
return;
}
animation = p_name;
emit_signal(SceneStringName(animation_changed));
if (frames.is_null()) {
animation = StringName();
stop();
ERR_FAIL_MSG(vformat("There is no animation with name '%s'.", p_name));
}
int frame_count = frames->get_frame_count(animation);
if (animation == StringName() || frame_count == 0) {
stop();
return;
} else if (!frames->get_animation_names().has(animation)) {
animation = StringName();
stop();
ERR_FAIL_MSG(vformat("There is no animation with name '%s'.", p_name));
}
if (std::signbit(get_playing_speed())) {
set_frame_and_progress(frame_count - 1, 1.0);
} else {
set_frame_and_progress(0, 0.0);
}
notify_property_list_changed();
queue_redraw();
}
StringName AnimatedSprite2D::get_animation() const {
return animation;
}
PackedStringArray AnimatedSprite2D::get_configuration_warnings() const {
PackedStringArray warnings = Node2D::get_configuration_warnings();
if (frames.is_null()) {
warnings.push_back(RTR("A SpriteFrames resource must be created or set in the \"Sprite Frames\" property in order for AnimatedSprite2D to display frames."));
}
return warnings;
}
#ifdef TOOLS_ENABLED
void AnimatedSprite2D::get_argument_options(const StringName &p_function, int p_idx, List<String> *r_options) const {
const String pf = p_function;
if (p_idx == 0 && frames.is_valid()) {
if (pf == "play" || pf == "play_backwards" || pf == "set_animation" || pf == "set_autoplay") {
List<StringName> al;
frames->get_animation_list(&al);
for (const StringName &name : al) {
r_options->push_back(String(name).quote());
}
}
}
Node2D::get_argument_options(p_function, p_idx, r_options);
}
#endif // TOOLS_ENABLED
#ifndef DISABLE_DEPRECATED
bool AnimatedSprite2D::_set(const StringName &p_name, const Variant &p_value) {
if ((p_name == SNAME("frames"))) {
set_sprite_frames(p_value);
return true;
}
return false;
}
#endif
void AnimatedSprite2D::_bind_methods() {
ClassDB::bind_method(<|fim_suffix|>, &AnimatedSprite2D::set_sprite_frames);
ClassDB::bind_method(D_METHOD("get_sprite_frames"), &AnimatedSprite2D::get_sprite_frames);
ClassDB::bind_method(D_METHOD("set_animation", "name"), &AnimatedSprite2D::set_animation);
ClassDB::bind_method(D_METHOD("get_animation"), &AnimatedSprite2D::get_animation);
ClassDB::bind_method(D_METHOD("set_autoplay", "name"), &AnimatedSprite2D::set_autoplay);
ClassDB::bind_method(D_METHOD("get_autoplay"), &AnimatedSprite2D::get_autoplay);
ClassDB::bind_method(D_METHOD("is_playing"), &AnimatedSprite2D::is_playing);
ClassDB::bind_method(D_METHOD("play", "name", "custom_speed", "from_end"), &AnimatedSprite2D::play, DEFVAL(StringName()), DEFVAL(1.0), DEFVAL(false));
ClassDB::bind_method(D_METHOD("play_backwards", "name"), &AnimatedSprite2D::play_backwards, DEFVAL(StringName()));
ClassDB::bind_method(D_METHOD("pause"), &AnimatedSprite2D::pause);
ClassDB::bind_method(D_METHOD("stop"), &AnimatedSprite2D::stop);
ClassDB::bind_method(D_METHOD("set_centered", "centered"), &AnimatedSprite2D::set_centered);
ClassDB::bind_method(D_METHOD("is_centered"), &AnimatedSprite2D::is_centered);
ClassDB::bind_method(D_METHOD("set_offset", "offset"), &AnimatedSprite2D::set_offset);
ClassDB::bind_method(D_METHOD("get_offset"), &AnimatedSprite2D::get_offset);
ClassDB::bind_method(D_METHOD("set_flip_h", "flip_h"), &AnimatedSprite2D::set_flip_h);
ClassDB::bind_method(D_METHOD("is_flipped_h"), &AnimatedSprite2D::is_flipped_h);
ClassDB::bind_method(D_METHOD("set_flip_v", "flip_v"), &AnimatedSprite2D::set_flip_v);
ClassDB::bind_method(D_METHOD("is_flipped_v"), &AnimatedSprite2D::is_flipped_v);
ClassDB::bind_method(D_METHOD("set_frame", "frame"), &AnimatedSprite2D::set_frame);
ClassDB::bind_method(D_METHOD("get_frame"), &AnimatedSprite2D::get_frame);
ClassDB::bind_method(D_METHOD("set_frame_progress", "progress"), &AnimatedSprite2D::set_frame_progress);
ClassDB::bind_method(D_METHOD("get_frame_progress"), &AnimatedSprite2D::get_frame<|fim_middle|>D_METHOD("set_sprite_frames", "sprite_frames")
|
tion == p_name) {
return;
}
animation = p_name;
emit_signal(SceneStringName(animation_changed));
if (frames.is_null()) {
animation = StringName();
stop();
ERR_FAIL_MSG(vformat("There is no animation with name '%s'.", p_name));
}
int frame_count = frames->get_frame_count(animation);
if (animation == StringName() || frame_count == 0) {
stop();
return;
} else if (!frames->get_animation_names().has(animation)) {
animation = StringName();
stop();
ERR_FAIL_MSG(vformat("There is no animation with name '%s'.", p_name));
}
if (std::signbit(get_playing_speed())) {
set_frame_and_progress(frame_count - 1, 1.0);
} else {
set_frame_and_progress(0, 0.0);
}
notify_property_list_changed();
queue_redraw();
}
StringName AnimatedSprite2D::get_animation() const {
return animation;
}
PackedStringArray AnimatedSprite2D::get_configuration_warnings() const {
PackedStringArray warnings = Node2D::get_configuration_warnings();
if (frames.is_null()) {
warnings.push_back(RTR("A SpriteFrames resource must be created or set in the \"Sprite Frames\" property in order for AnimatedSprite2D to display frames."));
}
return warnings;
}
#ifdef TOOLS_ENABLED
void AnimatedSprite2D::get_argument_options(const StringName &p_function, int p_idx, List<String> *r_options) const {
const String pf = p_function;
if (p_idx == 0 && frames.is_valid()) {
if (pf == "play" || pf == "play_backwards" || pf == "set_animation" || pf == "set_autoplay") {
List<StringName> al;
frames->get_animation_list(&al);
for (const StringName &name : al) {
r_options->push_back(String(name).quote());
}
}
}
Node2D::get_argument_options(p_function, p_idx, r_options);
}
#endif // TOOLS_ENABLED
#ifndef DISABLE_DEPRECATED
bool AnimatedSprite2D::_set(const StringName &p_name, const Variant &p_value) {
if ((p_name == SNAME("frames"))) {
set_sprite_frames(p_value);
return true;
}
return false;
}
#endif
void AnimatedSprite2D::_bind_methods() {
ClassDB::bind_method(
|
D_METHOD("set_sprite_frames", "sprite_frames")
|
, &AnimatedSprite2D::set_sprite_frames);
ClassDB::bind_method(D_METHOD("get_sprite_frames"), &AnimatedSprite2D::get_sprite_frames);
ClassDB::bind_method(D_METHOD("set_animation", "name"), &AnimatedSprite2D::set_animation);
ClassDB::bind_method(D_METHOD("get_animation"), &AnimatedSprite2D::get_animation);
ClassDB::bind_method(D_METHOD("set_autoplay", "name"), &AnimatedSprite2D::set_autoplay);
ClassDB::bind_method(D_METHOD("get_autoplay"), &AnimatedSprite2D::get_autoplay);
ClassDB::bind_method(D_METHOD("is_playing"), &AnimatedSprite2D::is_playing);
ClassDB::bind_method(D_METHOD("play", "name", "custom_speed", "from_end"), &AnimatedSprite2D::play, DEFVAL(StringName()), DEFVAL(1.0), DEFVAL(false));
ClassDB::bind_method(D_METHOD("play_backwards", "name"), &AnimatedSprite2D::play_backwards, DEFVAL(StringName()));
ClassDB::bind_method(D_METHOD("pause"), &AnimatedSprite2D::pause);
ClassDB::bind_method(D_METHOD("stop"), &AnimatedSprite2D::stop);
ClassDB::bind_method(D_METHOD("set_centered", "centered"), &AnimatedSprite2D::set_centered);
ClassDB::bind_method(D_METHOD("is_centered"), &AnimatedSprite2D::is_centered);
ClassDB::bind_method(D_METHOD("set_offset", "offset"), &AnimatedSprite2D::set_offset);
ClassDB::bind_method(D_METHOD("get_offset"), &AnimatedSprite2D::get_offset);
ClassDB::bind_method(D_METHOD("set_flip_h", "flip_h"), &AnimatedSprite2D::set_flip_h);
ClassDB::bind_method(D_METHOD("is_flipped_h"), &AnimatedSprite2D::is_flipped_h);
ClassDB::bind_method(D_METHOD("set_flip_v", "flip_v"), &AnimatedSprite2D::set_flip_v);
ClassDB::bind_method(D_METHOD("is_flipped_v"), &AnimatedSprite2D::is_flipped_v);
ClassDB::bind_method(D_METHOD("set_frame", "frame"), &AnimatedSprite2D::set_frame);
ClassDB::bind_method(D_METHOD("get_frame"), &AnimatedSprite2D::get_frame);
ClassDB::bind_method(D_METHOD("set_frame_progress", "progress"), &AnimatedSprite2D::set_frame_progress);
ClassDB::bind_method(D_METHOD("get_frame_progress"), &AnimatedSprite2D::get_frame
|
ast_based
|
<|fim_prefix|>IdT {
llvm::SmallVector<ElementType> block(llvm::map_range(Get(id), transform));
return Add(block);
}
// Adds a block or finds an existing canonical block with the given content,
// and returns an ID to reference it.
auto AddCanonical(ConstRefType content) -> IdT {
if (content.empty()) {
return IdT::Empty;
}
auto result = canonical_blocks_.Insert(
content, [&] { return Add(content); }, KeyContext(this));
return result.key();
}
// Promotes an existing block ID to a canonical block ID, or returns an
// existing canonical block ID if the block was already added. The specified
// block must not be modified after this point.
auto MakeCanonical(IdT id) -> IdT {
// Get the content first so that we don't have unnecessary translation of
// the `id` into the content during insertion.
auto result = canonical_blocks_.Insert(
Get(id), [id] { return id; }, KeyContext(this));
return result.key();
}
auto OutputYaml() const -> Yaml::OutputMapping {
return Yaml::OutputMapping([&](Yaml::OutputMapping::Map map) {
for (auto [block_id, block] : values_.enumerate()) {
map.Add(PrintToString(block_id),
Yaml::OutputMapping([&](Yaml::OutputMapping::Map map) {
for (auto [i, elem_id] : llvm::enumerate(block)) {
map.Add(llvm::itostr(i), Yaml::OutputScalar(elem_id));
}
}));
}
});
}
// Collects memory usage of members.
auto CollectMemUsage(MemUsage& mem_usage, llvm::StringRef label) const
-> void {
mem_usage.Collect(MemUsage::ConcatLabel(label, "values_"), values_);
mem_usage.Collect(MemUsage::ConcatLabel(label, "canonical_blocks_"),
canonical_blocks_, KeyContext(this));
}
auto size() const -> int { return values_.size(); }
protected:
// Allocates a copy of the given data using our slab allocator.
auto AllocateCopy(ConstRefType data) -> RefType <|fim_suffix|>
return result;
}
// Allocates an uninitialized array using our slab allocator.
auto AllocateUninitialized(size_t size) -> RefType {
// We're not going to run a destructor, so ensure that's OK.
static_assert(std::is_trivially_destructible_v<ElementType>);
auto storage = static_cast<ElementType*>(
allocator_->Allocate(size * sizeof(ElementType), alignof(ElementType)));
return RefType(storage, size);
}
// Allow children to have more complex value handling.
auto values() -> ValueStore<IdT, RefType>& { return values_; }
private:
class KeyContext;
llvm::BumpPtrAllocator* allocator_;
ValueStore<IdT, RefType> values_;
Set<IdT, /*SmallSize=*/0, KeyContext> canonical_blocks_;
};
template <typename IdT, typename ElementT>
class BlockValueStore<IdT, ElementT>::KeyContext
: public TranslatingKeyContext<KeyContext> {
public:
explicit KeyContext(const BlockValueStore* store) : store_(store) {}
auto TranslateKey(IdT id) const -> ConstRefType { return store_->Get(id); }
private:
const BlockValueStore* store_;
};
} // namespace Carbon::SemIR
#endif // CARBON_TOOLCHAIN_BASE_BLOCK_VALUE_STORE_H_
<|fim_middle|>{
auto result = AllocateUninitialized(data.size());
std::uninitialized_copy(data.begin(), data.end(), result.begin());
|
IdT {
llvm::SmallVector<ElementType> block(llvm::map_range(Get(id), transform));
return Add(block);
}
// Adds a block or finds an existing canonical block with the given content,
// and returns an ID to reference it.
auto AddCanonical(ConstRefType content) -> IdT {
if (content.empty()) {
return IdT::Empty;
}
auto result = canonical_blocks_.Insert(
content, [&] { return Add(content); }, KeyContext(this));
return result.key();
}
// Promotes an existing block ID to a canonical block ID, or returns an
// existing canonical block ID if the block was already added. The specified
// block must not be modified after this point.
auto MakeCanonical(IdT id) -> IdT {
// Get the content first so that we don't have unnecessary translation of
// the `id` into the content during insertion.
auto result = canonical_blocks_.Insert(
Get(id), [id] { return id; }, KeyContext(this));
return result.key();
}
auto OutputYaml() const -> Yaml::OutputMapping {
return Yaml::OutputMapping([&](Yaml::OutputMapping::Map map) {
for (auto [block_id, block] : values_.enumerate()) {
map.Add(PrintToString(block_id),
Yaml::OutputMapping([&](Yaml::OutputMapping::Map map) {
for (auto [i, elem_id] : llvm::enumerate(block)) {
map.Add(llvm::itostr(i), Yaml::OutputScalar(elem_id));
}
}));
}
});
}
// Collects memory usage of members.
auto CollectMemUsage(MemUsage& mem_usage, llvm::StringRef label) const
-> void {
mem_usage.Collect(MemUsage::ConcatLabel(label, "values_"), values_);
mem_usage.Collect(MemUsage::ConcatLabel(label, "canonical_blocks_"),
canonical_blocks_, KeyContext(this));
}
auto size() const -> int { return values_.size(); }
protected:
// Allocates a copy of the given data using our slab allocator.
auto AllocateCopy(ConstRefType data) -> RefType
|
{
auto result = AllocateUninitialized(data.size());
std::uninitialized_copy(data.begin(), data.end(), result.begin());
|
return result;
}
// Allocates an uninitialized array using our slab allocator.
auto AllocateUninitialized(size_t size) -> RefType {
// We're not going to run a destructor, so ensure that's OK.
static_assert(std::is_trivially_destructible_v<ElementType>);
auto storage = static_cast<ElementType*>(
allocator_->Allocate(size * sizeof(ElementType), alignof(ElementType)));
return RefType(storage, size);
}
// Allow children to have more complex value handling.
auto values() -> ValueStore<IdT, RefType>& { return values_; }
private:
class KeyContext;
llvm::BumpPtrAllocator* allocator_;
ValueStore<IdT, RefType> values_;
Set<IdT, /*SmallSize=*/0, KeyContext> canonical_blocks_;
};
template <typename IdT, typename ElementT>
class BlockValueStore<IdT, ElementT>::KeyContext
: public TranslatingKeyContext<KeyContext> {
public:
explicit KeyContext(const BlockValueStore* store) : store_(store) {}
auto TranslateKey(IdT id) const -> ConstRefType { return store_->Get(id); }
private:
const BlockValueStore* store_;
};
} // namespace Carbon::SemIR
#endif // CARBON_TOOLCHAIN_BASE_BLOCK_VALUE_STORE_H_
|
ast_based
|
<|fim_prefix|> show this help message and exit\n");
fprintf(stderr, " --copy-vocab-from-model FNAME path of gguf llama model or llama2.c vocabulary from which to copy vocab (default '%s')\n", params->fn_vocab_model);
fprintf(stderr, " --llama2c-model FNAME [REQUIRED] model path from which to load Karpathy's llama2.c model\n");
fprintf(stderr, " --llama2c-output-model FNAME model path to save the converted llama2.c model (default %s')\n", params->fn_llama2c_output_model);
fprintf(stderr, "\n");
}
static bool params_parse(int argc, char ** argv, struct train_params * params) {
bool invalid_param = false;
bool reqd_param_found = false;
std::string arg;
struct train_params default_params = get_default_train_params();
const std::string arg_prefix = "--";
for (int i = 1; i < argc; i++) {
arg = argv[i];
if (arg.compare(0, arg_prefix.size(), arg_prefix) == 0) {
std::replace(arg.begin(), arg.end(), '_', '-');
}
if (arg == "--copy-vocab-from-model") {
if (++i >= argc) {
invalid_param = true;
break;
}
params->fn_vocab_model = argv[i];
} else if (arg == "--llama2c-model") {
if (++i >= argc) {
invalid_param = true;
break;
}
reqd_param_found = true;
params->fn_llama2c_model = argv[i];
} else if (arg == "--llama2c-output-model") {
if (++i >= argc) {
invalid_param = true;
break;
}
params->fn_llama2c_output_model = argv[i];
} else if (arg == "-h" || arg == "--help") {
print_usage(argc, argv, &default_params);
exit(0);
} else {
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
print_usage(argc, argv, &default_params);
exit(1);
}
}
if (invalid_param) {
<|fim_suffix|>
print_usage(argc, argv, &default_params);
exit(1);
}
if (!reqd_param_found){
fprintf(stderr, "error: please specify a llama2.c .bin file to be converted with argument --llama2c-model\n");
print_usage(argc, argv, &default_params);
exit(1);
}
return true;
}
static std::string basename(const std::string &path) {
size_t pos = path.find_last_of("/\\");
if (pos == std::string::npos) {
return path;
}
return path.substr(pos + 1);
}
int main(int argc, char ** argv) {
common_init();
struct train_params params = get_default_train_params();
if (!params_parse(argc, argv, ¶ms)) {
return 1;
}
Config config;
TransformerWeights weights = {};
{
LOG_INF("%s: Loading llama2c model from %s\n", __func__, params.fn_llama2c_model);
FILE * file = fopen(params.fn_llama2c_model, "rb");
if (!file) {
LOG_ERR("%s: Unable to open the checkpoint file %s!\n", __func__, params.fn_llama2c_model);
return 1;
}
// read in the config header
if (fread(&config, sizeof(Config), 1, file) != 1) {
LOG_ERR("%s: Unable to read llama2c config from %s!\n",__func__,params.fn_llama2c_model);
return 1;
}
auto shared_weights = config.vocab_size > 0;
config.vocab_size = abs(config.vocab_size);
// read in the Transformer weights
alloc_weights(&weights, &config, shared_weights);
if (checkpoint_init_weights(&weights, &config, file, shared_weights)) {
LOG_ERR("%s: Unable to initialize transformer weights from %s!",__func__,params.fn_llama2c_model);
return 1;
}
fclose(file);
}
struct my_llama_vocab vocab;
load_vocab(params.fn_vocab_model, &config, &vocab);
struct my_llama_model model;
model.hparams.n_vocab = config.vocab_size; //llama_vocab_n_vocab(lctx);
model.hparams.n_ctx = params.n_c<|fim_middle|>fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str());
|
show this help message and exit\n");
fprintf(stderr, " --copy-vocab-from-model FNAME path of gguf llama model or llama2.c vocabulary from which to copy vocab (default '%s')\n", params->fn_vocab_model);
fprintf(stderr, " --llama2c-model FNAME [REQUIRED] model path from which to load Karpathy's llama2.c model\n");
fprintf(stderr, " --llama2c-output-model FNAME model path to save the converted llama2.c model (default %s')\n", params->fn_llama2c_output_model);
fprintf(stderr, "\n");
}
static bool params_parse(int argc, char ** argv, struct train_params * params) {
bool invalid_param = false;
bool reqd_param_found = false;
std::string arg;
struct train_params default_params = get_default_train_params();
const std::string arg_prefix = "--";
for (int i = 1; i < argc; i++) {
arg = argv[i];
if (arg.compare(0, arg_prefix.size(), arg_prefix) == 0) {
std::replace(arg.begin(), arg.end(), '_', '-');
}
if (arg == "--copy-vocab-from-model") {
if (++i >= argc) {
invalid_param = true;
break;
}
params->fn_vocab_model = argv[i];
} else if (arg == "--llama2c-model") {
if (++i >= argc) {
invalid_param = true;
break;
}
reqd_param_found = true;
params->fn_llama2c_model = argv[i];
} else if (arg == "--llama2c-output-model") {
if (++i >= argc) {
invalid_param = true;
break;
}
params->fn_llama2c_output_model = argv[i];
} else if (arg == "-h" || arg == "--help") {
print_usage(argc, argv, &default_params);
exit(0);
} else {
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
print_usage(argc, argv, &default_params);
exit(1);
}
}
if (invalid_param) {
|
fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str());
|
print_usage(argc, argv, &default_params);
exit(1);
}
if (!reqd_param_found){
fprintf(stderr, "error: please specify a llama2.c .bin file to be converted with argument --llama2c-model\n");
print_usage(argc, argv, &default_params);
exit(1);
}
return true;
}
static std::string basename(const std::string &path) {
size_t pos = path.find_last_of("/\\");
if (pos == std::string::npos) {
return path;
}
return path.substr(pos + 1);
}
int main(int argc, char ** argv) {
common_init();
struct train_params params = get_default_train_params();
if (!params_parse(argc, argv, ¶ms)) {
return 1;
}
Config config;
TransformerWeights weights = {};
{
LOG_INF("%s: Loading llama2c model from %s\n", __func__, params.fn_llama2c_model);
FILE * file = fopen(params.fn_llama2c_model, "rb");
if (!file) {
LOG_ERR("%s: Unable to open the checkpoint file %s!\n", __func__, params.fn_llama2c_model);
return 1;
}
// read in the config header
if (fread(&config, sizeof(Config), 1, file) != 1) {
LOG_ERR("%s: Unable to read llama2c config from %s!\n",__func__,params.fn_llama2c_model);
return 1;
}
auto shared_weights = config.vocab_size > 0;
config.vocab_size = abs(config.vocab_size);
// read in the Transformer weights
alloc_weights(&weights, &config, shared_weights);
if (checkpoint_init_weights(&weights, &config, file, shared_weights)) {
LOG_ERR("%s: Unable to initialize transformer weights from %s!",__func__,params.fn_llama2c_model);
return 1;
}
fclose(file);
}
struct my_llama_vocab vocab;
load_vocab(params.fn_vocab_model, &config, &vocab);
struct my_llama_model model;
model.hparams.n_vocab = config.vocab_size; //llama_vocab_n_vocab(lctx);
model.hparams.n_ctx = params.n_c
|
ast_based
|
<|fim_prefix|> draw_texture(solo, solo_rect.position);
RBMap<int, Rect2> track_icons;
track_icons[REMOVE_ICON] = remove_rect;
track_icons[LOCK_ICON] = lock_rect;
track_icons[VISIBILITY_ICON] = visible_rect;
track_icons[SOLO_ICON] = solo_rect;
subtrack_icons[current_track] = track_icons;
vofs += text_buf.get_size().y + v_separation;
track_v_scroll_max += text_buf.get_size().y + v_separation;
}
}
const Color accent = get_theme_color(SNAME("accent_color"), EditorStringName(Editor));
// Guides.
{
float min_left_scale = font->get_height(font_size) + v_separation;
float scale = (min_left_scale * 2) * timeline_v_zoom;
float step = Math::pow(10.0, Math::round(Math::log(scale / 5.0) / Math::log(10.0))) * 5.0;
scale = Math::snapped(scale, step);
while (scale / timeline_v_zoom < min_left_scale * 2) {
scale += step;
}
bool first = true;
int prev_iv = 0;
for (int i = font->get_height(font_size); i < get_size().height; i++) {
float ofs = get_size().height / 2.0 - i;
ofs *= timeline_v_zoom;
ofs += timeline_v_scroll;
int iv = int(ofs / scale);
if (ofs < 0) {
iv -= 1;
}
if (!first && iv != prev_iv) {
Color lc = h_line_color;
lc.a *= 0.5;
draw_line(Point2(limit, i), Point2(right_limit, i), lc, Math::round(EDSCALE));
Color c = color;
c.a *= 0.5;
draw_string(font, Point2(limit + 8, i - 2), TS->format_number(rtos(Math::snapped((iv + 1) * scale, step))), HORIZONTAL_ALIGNMENT_LEFT, -1, font_size, c);
}
first = false;
prev_iv = iv;
}
}
// Draw other curves.
{
float scale = timeline->get_zoom_scale();
Ref<Texture2D> point = get_editor_theme_icon(SNAME("KeyValue"));
for (const KeyValue<int, Color> &E : subtrack_colors) {
if (hidden_tracks.has(E.key)) {
continue;
}
_draw_track(E.key, E.value);
for (int i = 0; i < animation->track_get_key_count(E.key); i++) <|fim_suffix|>
}
if (track_count > 0 && !hidden_tracks.has(selected_track)) {
// Draw edited curve.
_draw_track(selected_track, selected_track_color);
}
}
const bool draw_selection_handles = selection.size() > 1;
LocalVector<Point2> selected_pos;
// Draw editor handles.
{
edit_points.clear();
float scale = timeline->get_zoom_scale();
for (int i = 0; i < track_count; ++i) {
bool draw_track = _is_track_curves_displayed(i) && !locked_tracks.has(i);
if (!draw_selection_handles && !draw_track) {
continue;
}
int key_count = animation->track_get_key_count(i);
for (int j = 0; j < key_count; ++j) {
float offset = animation->track_get_key_time(i, j);
float value = animation->bezier_track_get_key_value(i, j);
bool is_selected = selection.has(IntPair(i, j));
if (is_selected) {
if (moving_selection) {
offset += moving_selection_offset.x;
value += moving_selection_offset.y;
} else if (scaling_selection) {
offset += -scaling_selection_offset.x + (offset - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
value += -scaling_selection_offset.y + (value - scaling_selection_pivot.y) * (scaling_selection_scale.y - 1);
}
}
Vector2 pos((offset - timeline->get_value()) * scale + limit, _bezier_h_to_pixel(value));
if (draw_selection_handles && is_selected) {
selected_pos.push_back(pos);
}
if (!draw_track) {
continue;
}
Vector2 in_vec = animation->bezier_track_get_key_in_handle(i, j);
Vector2 out_vec = animation->bezier_track_get_key_out_handle(i, j);
if ((moving_handle == 1 || moving_handle == -1) && moving_handle_track == i && moving_handle_key == j) {
in_vec = moving_handle_left;
}
if ((moving_handle == 1 || moving_handle == -1) && moving_handle_track == i && moving_handle_key == j) {
out_vec = moving_handle_right;
}
if<|fim_middle|>{
float offset = animation->track_get_key_time(E.key, i);
float value = animation->bezier_track_get_key_value(E.key, i);
Vector2 pos((offset - timeline->get_value()) * scale + limit, _bezier_h_to_pixel(value));
if (pos.x >= limit && pos.x <= right_limit) {
draw_texture(point, pos - point->get_size() / 2.0, E.value);
}
}
|
draw_texture(solo, solo_rect.position);
RBMap<int, Rect2> track_icons;
track_icons[REMOVE_ICON] = remove_rect;
track_icons[LOCK_ICON] = lock_rect;
track_icons[VISIBILITY_ICON] = visible_rect;
track_icons[SOLO_ICON] = solo_rect;
subtrack_icons[current_track] = track_icons;
vofs += text_buf.get_size().y + v_separation;
track_v_scroll_max += text_buf.get_size().y + v_separation;
}
}
const Color accent = get_theme_color(SNAME("accent_color"), EditorStringName(Editor));
// Guides.
{
float min_left_scale = font->get_height(font_size) + v_separation;
float scale = (min_left_scale * 2) * timeline_v_zoom;
float step = Math::pow(10.0, Math::round(Math::log(scale / 5.0) / Math::log(10.0))) * 5.0;
scale = Math::snapped(scale, step);
while (scale / timeline_v_zoom < min_left_scale * 2) {
scale += step;
}
bool first = true;
int prev_iv = 0;
for (int i = font->get_height(font_size); i < get_size().height; i++) {
float ofs = get_size().height / 2.0 - i;
ofs *= timeline_v_zoom;
ofs += timeline_v_scroll;
int iv = int(ofs / scale);
if (ofs < 0) {
iv -= 1;
}
if (!first && iv != prev_iv) {
Color lc = h_line_color;
lc.a *= 0.5;
draw_line(Point2(limit, i), Point2(right_limit, i), lc, Math::round(EDSCALE));
Color c = color;
c.a *= 0.5;
draw_string(font, Point2(limit + 8, i - 2), TS->format_number(rtos(Math::snapped((iv + 1) * scale, step))), HORIZONTAL_ALIGNMENT_LEFT, -1, font_size, c);
}
first = false;
prev_iv = iv;
}
}
// Draw other curves.
{
float scale = timeline->get_zoom_scale();
Ref<Texture2D> point = get_editor_theme_icon(SNAME("KeyValue"));
for (const KeyValue<int, Color> &E : subtrack_colors) {
if (hidden_tracks.has(E.key)) {
continue;
}
_draw_track(E.key, E.value);
for (int i = 0; i < animation->track_get_key_count(E.key); i++)
|
{
float offset = animation->track_get_key_time(E.key, i);
float value = animation->bezier_track_get_key_value(E.key, i);
Vector2 pos((offset - timeline->get_value()) * scale + limit, _bezier_h_to_pixel(value));
if (pos.x >= limit && pos.x <= right_limit) {
draw_texture(point, pos - point->get_size() / 2.0, E.value);
}
}
|
}
if (track_count > 0 && !hidden_tracks.has(selected_track)) {
// Draw edited curve.
_draw_track(selected_track, selected_track_color);
}
}
const bool draw_selection_handles = selection.size() > 1;
LocalVector<Point2> selected_pos;
// Draw editor handles.
{
edit_points.clear();
float scale = timeline->get_zoom_scale();
for (int i = 0; i < track_count; ++i) {
bool draw_track = _is_track_curves_displayed(i) && !locked_tracks.has(i);
if (!draw_selection_handles && !draw_track) {
continue;
}
int key_count = animation->track_get_key_count(i);
for (int j = 0; j < key_count; ++j) {
float offset = animation->track_get_key_time(i, j);
float value = animation->bezier_track_get_key_value(i, j);
bool is_selected = selection.has(IntPair(i, j));
if (is_selected) {
if (moving_selection) {
offset += moving_selection_offset.x;
value += moving_selection_offset.y;
} else if (scaling_selection) {
offset += -scaling_selection_offset.x + (offset - scaling_selection_pivot.x) * (scaling_selection_scale.x - 1);
value += -scaling_selection_offset.y + (value - scaling_selection_pivot.y) * (scaling_selection_scale.y - 1);
}
}
Vector2 pos((offset - timeline->get_value()) * scale + limit, _bezier_h_to_pixel(value));
if (draw_selection_handles && is_selected) {
selected_pos.push_back(pos);
}
if (!draw_track) {
continue;
}
Vector2 in_vec = animation->bezier_track_get_key_in_handle(i, j);
Vector2 out_vec = animation->bezier_track_get_key_out_handle(i, j);
if ((moving_handle == 1 || moving_handle == -1) && moving_handle_track == i && moving_handle_key == j) {
in_vec = moving_handle_left;
}
if ((moving_handle == 1 || moving_handle == -1) && moving_handle_track == i && moving_handle_key == j) {
out_vec = moving_handle_right;
}
if
|
ast_based
|
<|fim_prefix|>ice = word->best_choice;
int w_conf = static_cast<int>(100 + 5 * choice->certainty());
// This is the eq for converting Tesseract confidence to 1..100
if (w_conf < 0) {
w_conf = 0;
}
if (w_conf > 100) {
w_conf = 100;
}
conf[n_word++] = w_conf;
}
conf[n_word] = -1;
return conf;
}
#ifndef DISABLED_LEGACY_ENGINE
/**
* Applies the given word to the adaptive classifier if possible.
* The word must be SPACE-DELIMITED UTF-8 - l i k e t h i s , so it can
* tell the boundaries of the graphemes.
* Assumes that SetImage/SetRectangle have been used to set the image
* to the given word. The mode arg should be PSM_SINGLE_WORD or
* PSM_CIRCLE_WORD, as that will be used to control layout analysis.
* The currently set PageSegMode is preserved.
* Returns false if adaption was not possible for some reason.
*/
bool TessBaseAPI::AdaptToWordStr(PageSegMode mode, const char *wordstr) {
int debug = 0;
GetIntVariable("applybox_debug", &debug);
bool success = true;
PageSegMode current_psm = GetPageSegMode();
SetPageSegMode(mode);
SetVariable("classify_enable_learning", "0");
const std::unique_ptr<const char[]> text(GetUTF8Text());
if (debug) {
tprintf("Trying to adapt \"%s\" to \"%s\"\n", text.get(), wordstr);
}
if (text != nullptr) {
PAGE_RES_IT it(page_res_);
WERD_RES *word_res = it.word();
if (word_res != nullptr) {
word_res->word->set_text(wordstr);
// Check to see if text matches wordstr.
int w = 0;
int t;
for (t = 0; text[t] != '\0'; ++t) {
if (text[t] == '\n' || text[t] == ' ') {
continue;
}
while (wordstr[w] == ' ') {
++w;
}
if (text[t] != wordstr[w]) {
break;
}
++w;
}
if (text[t] != '\0' || wordstr[w] != '\0') {
// No match.
delete page_res_;
std::vector<TBOX> boxes;
page_res_ = tesseract_->SetupApplyBoxes(boxes, block_list_);
<|fim_suffix|>
if (pr_it.word() == nullptr) {
success = false;
} else {
word_res = pr_it.word();
}
} else {
word_res->BestChoiceToCorrectText();
}
if (success) {
tesseract_->EnableLearning = true;
tesseract_->LearnWord(nullptr, word_res);
}
} else {
success = false;
}
} else {
success = false;
}
SetPageSegMode(current_psm);
return success;
}
#endif // ndef DISABLED_LEGACY_ENGINE
/**
* Free up recognition results and any stored image data, without actually
* freeing any recognition data that would be time-consuming to reload.
* Afterwards, you must call SetImage or TesseractRect before doing
* any Recognize or Get* operation.
*/
void TessBaseAPI::Clear() {
if (thresholder_ != nullptr) {
thresholder_->Clear();
}
ClearResults();
if (tesseract_ != nullptr) {
SetInputImage(nullptr);
}
}
/**
* Close down tesseract and free up all memory. End() is equivalent to
* destructing and reconstructing your TessBaseAPI.
* Once End() has been used, none of the other API functions may be used
* other than Init and anything declared above it in the class definition.
*/
void TessBaseAPI::End() {
Clear();
delete thresholder_;
thresholder_ = nullptr;
delete page_res_;
page_res_ = nullptr;
delete block_list_;
block_list_ = nullptr;
if (paragraph_models_ != nullptr) {
for (auto model : *paragraph_models_) {
delete model;
}
delete paragraph_models_;
paragraph_models_ = nullptr;
}
#ifndef DISABLED_LEGACY_ENGINE
if (osd_tesseract_ == tesseract_) {
osd_tesseract_ = nullptr;
}
delete osd_tesseract_;
osd_tesseract_ = nullptr;
delete equ_detect_;
equ_detect_ = nullptr;
#endif // ndef DISABLED_LEGACY_ENGINE
delete tesseract_;
tesseract_ = nullptr;
input_file_.clear();
output_file_.clear();
datapath_.clear();
language_.clear();
}
// Clear any library-level memory caches.
// There are a variety of ex<|fim_middle|>tesseract_->ReSegmentByClassification(page_res_);
tesseract_->TidyUp(page_res_);
PAGE_RES_IT pr_it(page_res_);
|
ice = word->best_choice;
int w_conf = static_cast<int>(100 + 5 * choice->certainty());
// This is the eq for converting Tesseract confidence to 1..100
if (w_conf < 0) {
w_conf = 0;
}
if (w_conf > 100) {
w_conf = 100;
}
conf[n_word++] = w_conf;
}
conf[n_word] = -1;
return conf;
}
#ifndef DISABLED_LEGACY_ENGINE
/**
* Applies the given word to the adaptive classifier if possible.
* The word must be SPACE-DELIMITED UTF-8 - l i k e t h i s , so it can
* tell the boundaries of the graphemes.
* Assumes that SetImage/SetRectangle have been used to set the image
* to the given word. The mode arg should be PSM_SINGLE_WORD or
* PSM_CIRCLE_WORD, as that will be used to control layout analysis.
* The currently set PageSegMode is preserved.
* Returns false if adaption was not possible for some reason.
*/
bool TessBaseAPI::AdaptToWordStr(PageSegMode mode, const char *wordstr) {
int debug = 0;
GetIntVariable("applybox_debug", &debug);
bool success = true;
PageSegMode current_psm = GetPageSegMode();
SetPageSegMode(mode);
SetVariable("classify_enable_learning", "0");
const std::unique_ptr<const char[]> text(GetUTF8Text());
if (debug) {
tprintf("Trying to adapt \"%s\" to \"%s\"\n", text.get(), wordstr);
}
if (text != nullptr) {
PAGE_RES_IT it(page_res_);
WERD_RES *word_res = it.word();
if (word_res != nullptr) {
word_res->word->set_text(wordstr);
// Check to see if text matches wordstr.
int w = 0;
int t;
for (t = 0; text[t] != '\0'; ++t) {
if (text[t] == '\n' || text[t] == ' ') {
continue;
}
while (wordstr[w] == ' ') {
++w;
}
if (text[t] != wordstr[w]) {
break;
}
++w;
}
if (text[t] != '\0' || wordstr[w] != '\0') {
// No match.
delete page_res_;
std::vector<TBOX> boxes;
page_res_ = tesseract_->SetupApplyBoxes(boxes, block_list_);
|
tesseract_->ReSegmentByClassification(page_res_);
tesseract_->TidyUp(page_res_);
PAGE_RES_IT pr_it(page_res_);
|
if (pr_it.word() == nullptr) {
success = false;
} else {
word_res = pr_it.word();
}
} else {
word_res->BestChoiceToCorrectText();
}
if (success) {
tesseract_->EnableLearning = true;
tesseract_->LearnWord(nullptr, word_res);
}
} else {
success = false;
}
} else {
success = false;
}
SetPageSegMode(current_psm);
return success;
}
#endif // ndef DISABLED_LEGACY_ENGINE
/**
* Free up recognition results and any stored image data, without actually
* freeing any recognition data that would be time-consuming to reload.
* Afterwards, you must call SetImage or TesseractRect before doing
* any Recognize or Get* operation.
*/
void TessBaseAPI::Clear() {
if (thresholder_ != nullptr) {
thresholder_->Clear();
}
ClearResults();
if (tesseract_ != nullptr) {
SetInputImage(nullptr);
}
}
/**
* Close down tesseract and free up all memory. End() is equivalent to
* destructing and reconstructing your TessBaseAPI.
* Once End() has been used, none of the other API functions may be used
* other than Init and anything declared above it in the class definition.
*/
void TessBaseAPI::End() {
Clear();
delete thresholder_;
thresholder_ = nullptr;
delete page_res_;
page_res_ = nullptr;
delete block_list_;
block_list_ = nullptr;
if (paragraph_models_ != nullptr) {
for (auto model : *paragraph_models_) {
delete model;
}
delete paragraph_models_;
paragraph_models_ = nullptr;
}
#ifndef DISABLED_LEGACY_ENGINE
if (osd_tesseract_ == tesseract_) {
osd_tesseract_ = nullptr;
}
delete osd_tesseract_;
osd_tesseract_ = nullptr;
delete equ_detect_;
equ_detect_ = nullptr;
#endif // ndef DISABLED_LEGACY_ENGINE
delete tesseract_;
tesseract_ = nullptr;
input_file_.clear();
output_file_.clear();
datapath_.clear();
language_.clear();
}
// Clear any library-level memory caches.
// There are a variety of ex
|
ast_based
|
<|fim_prefix|>ut_file);
fclose(training_output_file);
#endif // ndef DISABLED_LEGACY_ENGINE
} else {
// Now run the main recognition.
bool wait_for_text = true;
GetBoolVariable("paragraph_text_based", &wait_for_text);
if (!wait_for_text) {
DetectParagraphs(false);
}
if (tesseract_->recog_all_words(page_res_, monitor, nullptr, nullptr, 0)) {
if (wait_for_text) {
DetectParagraphs(true);
}
} else {
result = -1;
}
}
return result;
}
// Takes ownership of the input pix.
void TessBaseAPI::SetInputImage(Pix *pix) {
tesseract_->set_pix_original(pix);
}
Pix *TessBaseAPI::GetInputImage() {
return tesseract_->pix_original();
}
const char *TessBaseAPI::GetInputName() {
if (!input_file_.empty()) {
return input_file_.c_str();
}
return nullptr;
}
const char *TessBaseAPI::GetDatapath() {
return tesseract_->datadir.c_str();
}
int TessBaseAPI::GetSourceYResolution() {
if (thresholder_ == nullptr)
return -1;
return thresholder_->GetSourceYResolution();
}
// If flist exists, get data from there. Otherwise get data from buf.
// Seems convoluted, but is the easiest way I know of to meet multiple
// goals. Support streaming from stdin, and also work on platforms
// lacking fmemopen.
// TODO: check different logic for flist/buf and simplify.
bool TessBaseAPI::ProcessPagesFileList(FILE *flist, std::string *buf, const char *retry_config,
int timeout_millisec, TessResultRenderer *renderer,
int tessedit_page_number) {
if (!flist && !buf) {
return false;
}
unsigned page = (tessedit_page_number >= 0) ? tessedit_page_number : 0;
char pagename[MAX_PATH];
std::vector<std::string> lines;
if (!flist) {
std::string line;
for (const auto ch : *buf) {
if (ch == '\n') {
lines.push_back(line);
line.clear();
} else {
line.push_back(ch);
}
}
if (!line.empty()) {
<|fim_suffix|>
}
if (lines.empty()) {
return false;
}
}
// Skip to the requested page number.
for (unsigned i = 0; i < page; i++) {
if (flist) {
if (fgets(pagename, sizeof(pagename), flist) == nullptr) {
break;
}
}
}
// Begin producing output
if (renderer && !renderer->BeginDocument(document_title.c_str())) {
return false;
}
// Loop over all pages - or just the requested one
while (true) {
if (flist) {
if (fgets(pagename, sizeof(pagename), flist) == nullptr) {
break;
}
} else {
if (page >= lines.size()) {
break;
}
snprintf(pagename, sizeof(pagename), "%s", lines[page].c_str());
}
chomp_string(pagename);
Pix *pix = pixRead(pagename);
if (pix == nullptr) {
tprintf("Image file %s cannot be read!\n", pagename);
return false;
}
tprintf("Page %u : %s\n", page, pagename);
bool r = ProcessPage(pix, page, pagename, retry_config, timeout_millisec, renderer);
pixDestroy(&pix);
if (!r) {
return false;
}
if (tessedit_page_number >= 0) {
break;
}
++page;
}
// Finish producing output
if (renderer && !renderer->EndDocument()) {
return false;
}
return true;
}
bool TessBaseAPI::ProcessPagesMultipageTiff(const l_uint8 *data, size_t size, const char *filename,
const char *retry_config, int timeout_millisec,
TessResultRenderer *renderer,
int tessedit_page_number) {
Pix *pix = nullptr;
int page = (tessedit_page_number >= 0) ? tessedit_page_number : 0;
size_t offset = 0;
for (;; ++page) {
if (tessedit_page_number >= 0) {
page = tessedit_page_number;
pix = (data) ? pixReadMemTiff(data, size, page) : pixReadTiff(filename, page);
} else {
pix = (data) ? pixReadMemFromMultipageTiff(data, size, &offset)
: pixRead<|fim_middle|>// Add last line without terminating LF.
lines.push_back(line);
|
ut_file);
fclose(training_output_file);
#endif // ndef DISABLED_LEGACY_ENGINE
} else {
// Now run the main recognition.
bool wait_for_text = true;
GetBoolVariable("paragraph_text_based", &wait_for_text);
if (!wait_for_text) {
DetectParagraphs(false);
}
if (tesseract_->recog_all_words(page_res_, monitor, nullptr, nullptr, 0)) {
if (wait_for_text) {
DetectParagraphs(true);
}
} else {
result = -1;
}
}
return result;
}
// Takes ownership of the input pix.
void TessBaseAPI::SetInputImage(Pix *pix) {
tesseract_->set_pix_original(pix);
}
Pix *TessBaseAPI::GetInputImage() {
return tesseract_->pix_original();
}
const char *TessBaseAPI::GetInputName() {
if (!input_file_.empty()) {
return input_file_.c_str();
}
return nullptr;
}
const char *TessBaseAPI::GetDatapath() {
return tesseract_->datadir.c_str();
}
int TessBaseAPI::GetSourceYResolution() {
if (thresholder_ == nullptr)
return -1;
return thresholder_->GetSourceYResolution();
}
// If flist exists, get data from there. Otherwise get data from buf.
// Seems convoluted, but is the easiest way I know of to meet multiple
// goals. Support streaming from stdin, and also work on platforms
// lacking fmemopen.
// TODO: check different logic for flist/buf and simplify.
bool TessBaseAPI::ProcessPagesFileList(FILE *flist, std::string *buf, const char *retry_config,
int timeout_millisec, TessResultRenderer *renderer,
int tessedit_page_number) {
if (!flist && !buf) {
return false;
}
unsigned page = (tessedit_page_number >= 0) ? tessedit_page_number : 0;
char pagename[MAX_PATH];
std::vector<std::string> lines;
if (!flist) {
std::string line;
for (const auto ch : *buf) {
if (ch == '\n') {
lines.push_back(line);
line.clear();
} else {
line.push_back(ch);
}
}
if (!line.empty()) {
|
// Add last line without terminating LF.
lines.push_back(line);
|
}
if (lines.empty()) {
return false;
}
}
// Skip to the requested page number.
for (unsigned i = 0; i < page; i++) {
if (flist) {
if (fgets(pagename, sizeof(pagename), flist) == nullptr) {
break;
}
}
}
// Begin producing output
if (renderer && !renderer->BeginDocument(document_title.c_str())) {
return false;
}
// Loop over all pages - or just the requested one
while (true) {
if (flist) {
if (fgets(pagename, sizeof(pagename), flist) == nullptr) {
break;
}
} else {
if (page >= lines.size()) {
break;
}
snprintf(pagename, sizeof(pagename), "%s", lines[page].c_str());
}
chomp_string(pagename);
Pix *pix = pixRead(pagename);
if (pix == nullptr) {
tprintf("Image file %s cannot be read!\n", pagename);
return false;
}
tprintf("Page %u : %s\n", page, pagename);
bool r = ProcessPage(pix, page, pagename, retry_config, timeout_millisec, renderer);
pixDestroy(&pix);
if (!r) {
return false;
}
if (tessedit_page_number >= 0) {
break;
}
++page;
}
// Finish producing output
if (renderer && !renderer->EndDocument()) {
return false;
}
return true;
}
bool TessBaseAPI::ProcessPagesMultipageTiff(const l_uint8 *data, size_t size, const char *filename,
const char *retry_config, int timeout_millisec,
TessResultRenderer *renderer,
int tessedit_page_number) {
Pix *pix = nullptr;
int page = (tessedit_page_number >= 0) ? tessedit_page_number : 0;
size_t offset = 0;
for (;; ++page) {
if (tessedit_page_number >= 0) {
page = tessedit_page_number;
pix = (data) ? pixReadMemTiff(data, size, page) : pixReadTiff(filename, page);
} else {
pix = (data) ? pixReadMemFromMultipageTiff(data, size, &offset)
: pixRead
|
ast_based
|
<|fim_prefix|>e->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_REQUIRED: {
if (p_value) {
accesskit_node_set_required(ae->node);
} else {
accesskit_node_clear_required(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_VISITED: {
if (p_value) {
accesskit_node_set_visited(ae->node);
} else {
accesskit_node_clear_visited(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_BUSY: {
if (p_value) {
accesskit_node_set_busy(ae->node);
} else {
accesskit_node_clear_busy(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_MODAL: {
if (p_value) {
accesskit_node_set_modal(ae->node);
} else {
accesskit_node_clear_modal(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_TOUCH_PASSTHROUGH: {
if (p_value) {
accesskit_node_set_touch_transparent(ae->node);
} else {
accesskit_node_clear_touch_transparent(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_READONLY: {
if (p_value) {
accesskit_node_set_read_only(ae->node);
} else {
accesskit_node_clear_read_only(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_DISABLED: {
if (p_value) {
accesskit_node_set_disabled(ae->node);
} else {
accesskit_node_clear_disabled(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_CLIPS_CHILDREN: {
if (p_value) {
accesskit_node_set_clips_children(ae->node);
} else {
accesskit_node_clear_clips_children(ae->node);
}
} break;
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_classname(const RID &p_id, const String &p_classname) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_classname.is_empty()) {
<|fim_suffix|>
} else {
accesskit_node_clear_class_name(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_placeholder(const RID &p_id, const String &p_placeholder) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_placeholder.is_empty()) {
accesskit_node_set_placeholder(ae->node, p_placeholder.utf8().ptr());
} else {
accesskit_node_clear_placeholder(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_language(const RID &p_id, const String &p_language) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_language(ae->node, p_language.utf8().ptr());
}
void AccessibilityDriverAccessKit::accessibility_update_set_text_orientation(const RID &p_id, bool p_vertical) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (p_vertical) {
accesskit_node_set_text_direction(ae->node, ACCESSKIT_TEXT_DIRECTION_TOP_TO_BOTTOM);
} else {
accesskit_node_set_text_direction(ae->node, ACCESSKIT_TEXT_DIRECTION_LEFT_TO_RIGHT);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_list_orientation(const RID &p_id, bool p_vertical) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (p_vertical) {
accesski<|fim_middle|>accesskit_node_set_class_name(ae->node, p_classname.utf8().ptr());
|
e->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_REQUIRED: {
if (p_value) {
accesskit_node_set_required(ae->node);
} else {
accesskit_node_clear_required(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_VISITED: {
if (p_value) {
accesskit_node_set_visited(ae->node);
} else {
accesskit_node_clear_visited(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_BUSY: {
if (p_value) {
accesskit_node_set_busy(ae->node);
} else {
accesskit_node_clear_busy(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_MODAL: {
if (p_value) {
accesskit_node_set_modal(ae->node);
} else {
accesskit_node_clear_modal(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_TOUCH_PASSTHROUGH: {
if (p_value) {
accesskit_node_set_touch_transparent(ae->node);
} else {
accesskit_node_clear_touch_transparent(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_READONLY: {
if (p_value) {
accesskit_node_set_read_only(ae->node);
} else {
accesskit_node_clear_read_only(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_DISABLED: {
if (p_value) {
accesskit_node_set_disabled(ae->node);
} else {
accesskit_node_clear_disabled(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_CLIPS_CHILDREN: {
if (p_value) {
accesskit_node_set_clips_children(ae->node);
} else {
accesskit_node_clear_clips_children(ae->node);
}
} break;
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_classname(const RID &p_id, const String &p_classname) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_classname.is_empty()) {
|
accesskit_node_set_class_name(ae->node, p_classname.utf8().ptr());
|
} else {
accesskit_node_clear_class_name(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_placeholder(const RID &p_id, const String &p_placeholder) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_placeholder.is_empty()) {
accesskit_node_set_placeholder(ae->node, p_placeholder.utf8().ptr());
} else {
accesskit_node_clear_placeholder(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_language(const RID &p_id, const String &p_language) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_language(ae->node, p_language.utf8().ptr());
}
void AccessibilityDriverAccessKit::accessibility_update_set_text_orientation(const RID &p_id, bool p_vertical) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (p_vertical) {
accesskit_node_set_text_direction(ae->node, ACCESSKIT_TEXT_DIRECTION_TOP_TO_BOTTOM);
} else {
accesskit_node_set_text_direction(ae->node, ACCESSKIT_TEXT_DIRECTION_LEFT_TO_RIGHT);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_list_orientation(const RID &p_id, bool p_vertical) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (p_vertical) {
accesski
|
ast_based
|
<|fim_prefix|>_from_buft(ctx, buft);
if (!buf) {
LLAMA_LOG_ERROR("%s: failed to allocate buffer for control vector\n", __func__);
return false;
}
ggml_backend_buffer_clear(buf, 0);
bufs.emplace_back(buf);
}
return true;
}
bool llama_adapter_cvec::apply(
const llama_model & model,
const float * data,
size_t len,
int32_t n_embd,
int32_t il_start,
int32_t il_end) {
const auto & hparams = model.hparams;
if (data == nullptr) {
// disable the current control vector (but leave allocated for later)
layer_start = -1;
layer_end = -1;
return true;
}
if (n_embd != (int) hparams.n_embd) {
LLAMA_LOG_ERROR("%s: control vector n_embd does not match model\n", __func__);
return false;
}
if (tensors.empty()) {
if (!init(model)) {
return false;
}
}
layer_start = il_start;
layer_end = il_end;
for (size_t il = 1; il < hparams.n_layer; il++) {
assert(tensors[il] != nullptr);
const size_t off = n_embd * (il - 1); // buffer doesn't have data for layer 0, since it's never present
if (off + n_embd <= len) {
ggml_backend_tensor_set(tensors[il], data + off, 0, n_embd * ggml_element_size(tensors[il]));
}
}
return true;
}
// lora
llama_adapter_lora_weight * llama_adapter_lora::get_weight(ggml_tensor * w) {
const std::string name(w->name);
const auto pos = ab_map.find(name);
if (pos != ab_map.end()) {
return &pos->second;
}
return nullptr;
}
static void llama_adapter_lora_init_impl(llama_model & model, const char * path_lora, llama_adapter_lora & adapter) {
LLAMA_LOG_INFO("%s: loading lora adapter from '%s' ...\n", __func__, path_lora);
ggml_context * ctx_init;
gguf_init_params meta_gguf_params = {
/* .no_alloc = */ true,
/* .ctx = */ &ctx_init,
};
<|fim_suffix|>
if (!ctx_gguf) {
throw std::runtime_error("failed to load lora adapter file from " + std::string(path_lora));
}
ggml_context_ptr ctx { ctx_init };
// check metadata
{
const gguf_context * gguf_ctx = ctx_gguf.get();
LLAMA_LOG_INFO("%s: Dumping metadata keys/values.\n", __func__);
// get metadata as string
for (int i = 0; i < gguf_get_n_kv(gguf_ctx); i++) {
gguf_type type = gguf_get_kv_type(gguf_ctx, i);
const std::string type_name =
type == GGUF_TYPE_ARRAY
? format("%s[%s,%zu]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(gguf_ctx, i)), gguf_get_arr_n(gguf_ctx, i))
: gguf_type_name(type);
const char * name = gguf_get_key(gguf_ctx, i);
const std::string value = gguf_kv_to_str(gguf_ctx, i);
if (type != GGUF_TYPE_ARRAY) {
adapter.gguf_kv.emplace(name, value);
}
const size_t MAX_VALUE_LEN = 40;
std::string print_value = value.size() > MAX_VALUE_LEN ? format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str()) : value;
replace_all(print_value, "\n", "\\n");
LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), print_value.c_str());
}
auto get_kv_str = [&](const std::string & key) -> std::string {
int id = gguf_find_key(gguf_ctx, key.c_str());
return id < 0 ? "" : std::string(gguf_get_val_str(gguf_ctx, id));
};
auto get_kv_f32 = [&](const std::string & key) -> float {
int id = gguf_find_key(gguf_ctx, key.c_str());
return id < 0 ? 0.0f : gguf_get_val_f32(gguf_ctx, id);
};
LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
auto general_type = get_kv_str(llm_kv(LLM_KV_GENERAL_TYPE));
if (general_type != "adapter") {
throw std::runtime_error("expect general.type to be 'a<|fim_middle|>gguf_context_ptr ctx_gguf { gguf_init_from_file(path_lora, meta_gguf_params) };
|
_from_buft(ctx, buft);
if (!buf) {
LLAMA_LOG_ERROR("%s: failed to allocate buffer for control vector\n", __func__);
return false;
}
ggml_backend_buffer_clear(buf, 0);
bufs.emplace_back(buf);
}
return true;
}
bool llama_adapter_cvec::apply(
const llama_model & model,
const float * data,
size_t len,
int32_t n_embd,
int32_t il_start,
int32_t il_end) {
const auto & hparams = model.hparams;
if (data == nullptr) {
// disable the current control vector (but leave allocated for later)
layer_start = -1;
layer_end = -1;
return true;
}
if (n_embd != (int) hparams.n_embd) {
LLAMA_LOG_ERROR("%s: control vector n_embd does not match model\n", __func__);
return false;
}
if (tensors.empty()) {
if (!init(model)) {
return false;
}
}
layer_start = il_start;
layer_end = il_end;
for (size_t il = 1; il < hparams.n_layer; il++) {
assert(tensors[il] != nullptr);
const size_t off = n_embd * (il - 1); // buffer doesn't have data for layer 0, since it's never present
if (off + n_embd <= len) {
ggml_backend_tensor_set(tensors[il], data + off, 0, n_embd * ggml_element_size(tensors[il]));
}
}
return true;
}
// lora
llama_adapter_lora_weight * llama_adapter_lora::get_weight(ggml_tensor * w) {
const std::string name(w->name);
const auto pos = ab_map.find(name);
if (pos != ab_map.end()) {
return &pos->second;
}
return nullptr;
}
static void llama_adapter_lora_init_impl(llama_model & model, const char * path_lora, llama_adapter_lora & adapter) {
LLAMA_LOG_INFO("%s: loading lora adapter from '%s' ...\n", __func__, path_lora);
ggml_context * ctx_init;
gguf_init_params meta_gguf_params = {
/* .no_alloc = */ true,
/* .ctx = */ &ctx_init,
};
|
gguf_context_ptr ctx_gguf { gguf_init_from_file(path_lora, meta_gguf_params) };
|
if (!ctx_gguf) {
throw std::runtime_error("failed to load lora adapter file from " + std::string(path_lora));
}
ggml_context_ptr ctx { ctx_init };
// check metadata
{
const gguf_context * gguf_ctx = ctx_gguf.get();
LLAMA_LOG_INFO("%s: Dumping metadata keys/values.\n", __func__);
// get metadata as string
for (int i = 0; i < gguf_get_n_kv(gguf_ctx); i++) {
gguf_type type = gguf_get_kv_type(gguf_ctx, i);
const std::string type_name =
type == GGUF_TYPE_ARRAY
? format("%s[%s,%zu]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(gguf_ctx, i)), gguf_get_arr_n(gguf_ctx, i))
: gguf_type_name(type);
const char * name = gguf_get_key(gguf_ctx, i);
const std::string value = gguf_kv_to_str(gguf_ctx, i);
if (type != GGUF_TYPE_ARRAY) {
adapter.gguf_kv.emplace(name, value);
}
const size_t MAX_VALUE_LEN = 40;
std::string print_value = value.size() > MAX_VALUE_LEN ? format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str()) : value;
replace_all(print_value, "\n", "\\n");
LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), print_value.c_str());
}
auto get_kv_str = [&](const std::string & key) -> std::string {
int id = gguf_find_key(gguf_ctx, key.c_str());
return id < 0 ? "" : std::string(gguf_get_val_str(gguf_ctx, id));
};
auto get_kv_f32 = [&](const std::string & key) -> float {
int id = gguf_find_key(gguf_ctx, key.c_str());
return id < 0 ? 0.0f : gguf_get_val_f32(gguf_ctx, id);
};
LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
auto general_type = get_kv_str(llm_kv(LLM_KV_GENERAL_TYPE));
if (general_type != "adapter") {
throw std::runtime_error("expect general.type to be 'a
|
ast_based
|
<|fim_prefix|>S->shaped_get_run_font_rid(p_shaped_text, i);
if (font_rid != RID()) {
CharString font_name = TS->font_get_name(font_rid).utf8();
if (font_name.length() > 0) {
accesskit_node_set_font_family(ae->node, font_name.ptr());
}
if (TS->font_get_style(font_rid).has_flag(TextServer::FONT_BOLD)) {
accesskit_node_set_bold(ae->node);
}
if (TS->font_get_style(font_rid).has_flag(TextServer::FONT_ITALIC)) {
accesskit_node_set_italic(ae->node);
}
accesskit_node_set_font_weight(ae->node, TS->font_get_weight(font_rid));
}
accesskit_node_set_font_size(ae->node, TS->shaped_get_run_font_size(p_shaped_text, i));
CharString language = TS->shaped_get_run_language(p_shaped_text, i).utf8();
if (language.length() > 0) {
accesskit_node_set_language(ae->node, language.ptr());
}
accesskit_node_set_text_direction(ae->node, ACCESSKIT_TEXT_DIRECTION_LEFT_TO_RIGHT);
accesskit_rect rect;
rect.x0 = run_off_x;
rect.y0 = 0;
rect.x1 = run_off_x + size_x;
rect.y1 = text_height;
accesskit_node_set_bounds(ae->node, rect);
accesskit_node_add_action(ae->node, ACCESSKIT_ACTION_SCROLL_INTO_VIEW);
run_off_x += size_x;
}
{
// Add "\n" at the end.
AccessibilityElement *ae = memnew(AccessibilityElement);
ae->role = ACCESSKIT_ROLE_TEXT_RUN;
ae->window_id = parent_ae->window_id;
ae->parent = root_rid;
ae->run = Vector3i(full_range.y, full_range.y, run_count);
ae->node = accesskit_node_new(ae->role);
text_elements.push_back(ae);
Vector<uint8_t> char_lengths;
char_lengths.push_back(1);
accesskit_node_set_value(ae->node, "\n");
accesskit_node_set_character_lengths(ae->node, char_lengths.size(), char_lengths.ptr());
Vector<float> char_positions;
Vector<float> char_widths;
char_positions.push_back(0.0);
char_widths.push_back(1.0);
accesskit_node_set_character_positions(ae->node, char_positions.size(), char_positions.ptr());
accesskit_node_set_character_widths(ae->node, char_widths.size(), char_widths.ptr());
<|fim_suffix|>;
accesskit_rect rect;
rect.x0 = run_off_x;
rect.y0 = 0;
rect.x1 = run_off_x + 1;
rect.y1 = text_height;
accesskit_node_set_bounds(ae->node, rect);
}
// Sort runs in logical order.
struct RunCompare {
_FORCE_INLINE_ bool operator()(const AccessibilityElement *l, const AccessibilityElement *r) const {
return l->run.x < r->run.x;
}
};
text_elements.sort_custom<RunCompare>();
for (AccessibilityElement *text_element : text_elements) {
RID rid = rid_owner.make_rid(text_element);
root_ae->children.push_back(rid);
wd->update.insert(rid);
}
return root_rid;
}
bool AccessibilityDriverAccessKit::accessibility_has_element(const RID &p_id) const {
return rid_owner.owns(p_id);
}
void AccessibilityDriverAccessKit::_free_recursive(WindowData *p_wd, const RID &p_id) {
if (p_wd && p_wd->update.has(p_id)) {
p_wd->update.erase(p_id);
}
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
for (const RID &rid : ae->children) {
_free_recursive(p_wd, rid);
}
if (ae->node) {
accesskit_node_free(ae->node);
}
memdelete(ae);
rid_owner.free(p_id);
}
void AccessibilityDriverAccessKit::accessibility_free_element(const RID &p_id) {
ERR_FAIL_COND_MSG(in_accessibility_update, "Element can't be removed inside NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
if (ae) {
WindowData *wd = windows.getptr(ae->window_id);
AccessibilityElement *parent_ae = rid_owner.get_or_null(ae->parent);
if (parent_ae) {
parent_ae->children.erase(p_id);
}
_free_recursive(wd, p_id);
}
}
void AccessibilityDriverAccessKit::accessibility_element_set_meta(const RID &p_id, const Variant &p_meta) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
ae->meta = p_meta;
}
Variant AccessibilityDriverAccessKit::accessibility_ele<|fim_middle|>accesskit_node_set_text_direction(ae->node, ACCESSKIT_TEXT_DIRECTION_LEFT_TO_RIGHT)
|
S->shaped_get_run_font_rid(p_shaped_text, i);
if (font_rid != RID()) {
CharString font_name = TS->font_get_name(font_rid).utf8();
if (font_name.length() > 0) {
accesskit_node_set_font_family(ae->node, font_name.ptr());
}
if (TS->font_get_style(font_rid).has_flag(TextServer::FONT_BOLD)) {
accesskit_node_set_bold(ae->node);
}
if (TS->font_get_style(font_rid).has_flag(TextServer::FONT_ITALIC)) {
accesskit_node_set_italic(ae->node);
}
accesskit_node_set_font_weight(ae->node, TS->font_get_weight(font_rid));
}
accesskit_node_set_font_size(ae->node, TS->shaped_get_run_font_size(p_shaped_text, i));
CharString language = TS->shaped_get_run_language(p_shaped_text, i).utf8();
if (language.length() > 0) {
accesskit_node_set_language(ae->node, language.ptr());
}
accesskit_node_set_text_direction(ae->node, ACCESSKIT_TEXT_DIRECTION_LEFT_TO_RIGHT);
accesskit_rect rect;
rect.x0 = run_off_x;
rect.y0 = 0;
rect.x1 = run_off_x + size_x;
rect.y1 = text_height;
accesskit_node_set_bounds(ae->node, rect);
accesskit_node_add_action(ae->node, ACCESSKIT_ACTION_SCROLL_INTO_VIEW);
run_off_x += size_x;
}
{
// Add "\n" at the end.
AccessibilityElement *ae = memnew(AccessibilityElement);
ae->role = ACCESSKIT_ROLE_TEXT_RUN;
ae->window_id = parent_ae->window_id;
ae->parent = root_rid;
ae->run = Vector3i(full_range.y, full_range.y, run_count);
ae->node = accesskit_node_new(ae->role);
text_elements.push_back(ae);
Vector<uint8_t> char_lengths;
char_lengths.push_back(1);
accesskit_node_set_value(ae->node, "\n");
accesskit_node_set_character_lengths(ae->node, char_lengths.size(), char_lengths.ptr());
Vector<float> char_positions;
Vector<float> char_widths;
char_positions.push_back(0.0);
char_widths.push_back(1.0);
accesskit_node_set_character_positions(ae->node, char_positions.size(), char_positions.ptr());
accesskit_node_set_character_widths(ae->node, char_widths.size(), char_widths.ptr());
|
accesskit_node_set_text_direction(ae->node, ACCESSKIT_TEXT_DIRECTION_LEFT_TO_RIGHT)
|
;
accesskit_rect rect;
rect.x0 = run_off_x;
rect.y0 = 0;
rect.x1 = run_off_x + 1;
rect.y1 = text_height;
accesskit_node_set_bounds(ae->node, rect);
}
// Sort runs in logical order.
struct RunCompare {
_FORCE_INLINE_ bool operator()(const AccessibilityElement *l, const AccessibilityElement *r) const {
return l->run.x < r->run.x;
}
};
text_elements.sort_custom<RunCompare>();
for (AccessibilityElement *text_element : text_elements) {
RID rid = rid_owner.make_rid(text_element);
root_ae->children.push_back(rid);
wd->update.insert(rid);
}
return root_rid;
}
bool AccessibilityDriverAccessKit::accessibility_has_element(const RID &p_id) const {
return rid_owner.owns(p_id);
}
void AccessibilityDriverAccessKit::_free_recursive(WindowData *p_wd, const RID &p_id) {
if (p_wd && p_wd->update.has(p_id)) {
p_wd->update.erase(p_id);
}
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
for (const RID &rid : ae->children) {
_free_recursive(p_wd, rid);
}
if (ae->node) {
accesskit_node_free(ae->node);
}
memdelete(ae);
rid_owner.free(p_id);
}
void AccessibilityDriverAccessKit::accessibility_free_element(const RID &p_id) {
ERR_FAIL_COND_MSG(in_accessibility_update, "Element can't be removed inside NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
if (ae) {
WindowData *wd = windows.getptr(ae->window_id);
AccessibilityElement *parent_ae = rid_owner.get_or_null(ae->parent);
if (parent_ae) {
parent_ae->children.erase(p_id);
}
_free_recursive(wd, p_id);
}
}
void AccessibilityDriverAccessKit::accessibility_element_set_meta(const RID &p_id, const Variant &p_meta) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
ae->meta = p_meta;
}
Variant AccessibilityDriverAccessKit::accessibility_ele
|
ast_based
|
<|fim_prefix|>const std::string &path) {
size_t pos = path.find_last_of("/\\");
if (pos == std::string::npos) {
return path;
}
return path.substr(pos + 1);
}
int main(int argc, char ** argv) {
common_init();
struct train_params params = get_default_train_params();
if (!params_parse(argc, argv, ¶ms)) {
return 1;
}
Config config;
TransformerWeights weights = {};
{
LOG_INF("%s: Loading llama2c model from %s\n", __func__, params.fn_llama2c_model);
FILE * file = fopen(params.fn_llama2c_model, "rb");
if (!file) {
LOG_ERR("%s: Unable to open the checkpoint file %s!\n", __func__, params.fn_llama2c_model);
return 1;
}
// read in the config header
if (fread(&config, sizeof(Config), 1, file) != 1) {
LOG_ERR("%s: Unable to read llama2c config from %s!\n",__func__,params.fn_llama2c_model);
return 1;
}
auto shared_weights = config.vocab_size > 0;
config.vocab_size = abs(config.vocab_size);
// read in the Transformer weights
alloc_weights(&weights, &config, shared_weights);
if (checkpoint_init_weights(&weights, &config, file, shared_weights)) {
LOG_ERR("%s: Unable to initialize transformer weights from %s!",__func__,params.fn_llama2c_model);
return 1;
}
fclose(file);
}
struct my_llama_vocab vocab;
load_vocab(params.fn_vocab_model, &config, &vocab);
struct my_llama_model model;
model.hparams.n_vocab = config.vocab_size; //llama_vocab_n_vocab(lctx);
model.hparams.n_ctx = params.n_ctx;
model.hparams.n_embd = config.dim; //params.n_embd;
model.hparams.n_ff = config.hidden_dim;
model.hparams.n_mult = 32;//params.n_mult;
model.hparams.n_head = config.n_heads; //params.n_head;
model.hparams.n_head_kv = config.n_kv_heads;
model.hparams.n_layer = config.n_layers; //params.n_layer;
<|fim_suffix|>;
print_params(&model.hparams);
struct ggml_init_params lcparams;
lcparams.mem_size = 1024ll*1024ll*1024ll*((size_t) params.mem_model_gb);
lcparams.mem_buffer = NULL;
lcparams.no_alloc = false;
model.ctx = ggml_init(lcparams);
init_model(&model);
model.name = basename(params.fn_llama2c_model);
save_as_llama_model(&vocab, &model, &weights, params.fn_llama2c_output_model);
LOG_INF("%s: Saving llama.c model file %s in ggml format at %s\n", __func__, params.fn_llama2c_model, params.fn_llama2c_output_model);
ggml_free(model.ctx);
return 0;
}
<|fim_middle|>model.hparams.n_rot = std::min((uint32_t)params.n_rotmax, model.hparams.n_embd / model.hparams.n_head)
|
const std::string &path) {
size_t pos = path.find_last_of("/\\");
if (pos == std::string::npos) {
return path;
}
return path.substr(pos + 1);
}
int main(int argc, char ** argv) {
common_init();
struct train_params params = get_default_train_params();
if (!params_parse(argc, argv, ¶ms)) {
return 1;
}
Config config;
TransformerWeights weights = {};
{
LOG_INF("%s: Loading llama2c model from %s\n", __func__, params.fn_llama2c_model);
FILE * file = fopen(params.fn_llama2c_model, "rb");
if (!file) {
LOG_ERR("%s: Unable to open the checkpoint file %s!\n", __func__, params.fn_llama2c_model);
return 1;
}
// read in the config header
if (fread(&config, sizeof(Config), 1, file) != 1) {
LOG_ERR("%s: Unable to read llama2c config from %s!\n",__func__,params.fn_llama2c_model);
return 1;
}
auto shared_weights = config.vocab_size > 0;
config.vocab_size = abs(config.vocab_size);
// read in the Transformer weights
alloc_weights(&weights, &config, shared_weights);
if (checkpoint_init_weights(&weights, &config, file, shared_weights)) {
LOG_ERR("%s: Unable to initialize transformer weights from %s!",__func__,params.fn_llama2c_model);
return 1;
}
fclose(file);
}
struct my_llama_vocab vocab;
load_vocab(params.fn_vocab_model, &config, &vocab);
struct my_llama_model model;
model.hparams.n_vocab = config.vocab_size; //llama_vocab_n_vocab(lctx);
model.hparams.n_ctx = params.n_ctx;
model.hparams.n_embd = config.dim; //params.n_embd;
model.hparams.n_ff = config.hidden_dim;
model.hparams.n_mult = 32;//params.n_mult;
model.hparams.n_head = config.n_heads; //params.n_head;
model.hparams.n_head_kv = config.n_kv_heads;
model.hparams.n_layer = config.n_layers; //params.n_layer;
|
model.hparams.n_rot = std::min((uint32_t)params.n_rotmax, model.hparams.n_embd / model.hparams.n_head)
|
;
print_params(&model.hparams);
struct ggml_init_params lcparams;
lcparams.mem_size = 1024ll*1024ll*1024ll*((size_t) params.mem_model_gb);
lcparams.mem_buffer = NULL;
lcparams.no_alloc = false;
model.ctx = ggml_init(lcparams);
init_model(&model);
model.name = basename(params.fn_llama2c_model);
save_as_llama_model(&vocab, &model, &weights, params.fn_llama2c_output_model);
LOG_INF("%s: Saving llama.c model file %s in ggml format at %s\n", __func__, params.fn_llama2c_model, params.fn_llama2c_output_model);
ggml_free(model.ctx);
return 0;
}
|
ast_based
|
<|fim_prefix|>ow std::runtime_error("invalid gguf type for " + key);
}
const auto arr_type = gguf_get_arr_type(ctx_gguf.get(), kid);
if (arr_type != GGUF_TYPE_UINT32) {
throw std::runtime_error("invalid gguf element type for " + key);
}
const size_t seq_len = gguf_get_arr_n(ctx_gguf.get(), kid);
const void * data = gguf_get_arr_data(ctx_gguf.get(), kid);
adapter.alora_invocation_tokens.resize(seq_len);
std::copy(
(const llama_token *)data,
(const llama_token *)data + seq_len,
adapter.alora_invocation_tokens.begin());
}
}
int n_tensors = gguf_get_n_tensors(ctx_gguf.get());
// contexts for each buffer type
std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
auto it = ctx_map.find(buft);
if (it == ctx_map.end()) {
// add a new context
ggml_init_params params = {
/*.mem_size =*/ n_tensors*ggml_tensor_overhead(),
/*.mem_buffer =*/ NULL,
/*.no_alloc =*/ true,
};
ggml_context * buft_ctx = ggml_init(params);
if (!buft_ctx) {
return nullptr;
}
ctx_map[buft] = buft_ctx;
adapter.ctxs.emplace_back(buft_ctx);
return buft_ctx;
};
return it->second;
};
// bundle lora_a and lora_b into pairs
std::map<std::string, llama_adapter_lora_weight> ab_map;
auto str_endswith = [](const std::string & str, const std::string & suffix) {
return str.size() >= suffix.size() && str.compare(str.size()-suffix.size(), suffix.size(), suffix) == 0;
};
for (ggml_tensor * cur = ggml_get_first_tensor(ctx.get()); cur; cur = ggml_get_next_tensor(ctx.get(), cur)) {
std::string name(cur->name);
if (<|fim_suffix|>) {
replace_all(name, ".lora_a", "");
if (ab_map.find(name) == ab_map.end()) {
ab_map[name] = llama_adapter_lora_weight(cur, nullptr);
} else {
ab_map[name].a = cur;
}
} else if (str_endswith(name, ".lora_b")) {
replace_all(name, ".lora_b", "");
if (ab_map.find(name) == ab_map.end()) {
ab_map[name] = llama_adapter_lora_weight(nullptr, cur);
} else {
ab_map[name].b = cur;
}
} else if (str_endswith(name, "_norm.weight")) {
// TODO: add support for norm vector
// for now, we don't really care because most adapters still work fine without it
continue;
} else {
throw std::runtime_error("LoRA tensor '" + name + "' has unexpected suffix");
}
}
// get extra buffer types of the CPU
// TODO: a more general solution for non-CPU extra buft should be imlpemented in the future
// ref: https://github.com/ggml-org/llama.cpp/pull/12593#pullrequestreview-2718659948
std::vector<ggml_backend_buffer_type_t> buft_extra;
{
auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
if (!cpu_dev) {
throw std::runtime_error(format("%s: no CPU backend found", __func__));
}
auto * cpu_reg = ggml_backend_dev_backend_reg(cpu_dev);
auto ggml_backend_dev_get_extra_bufts_fn = (ggml_backend_dev_get_extra_bufts_t)
ggml_backend_reg_get_proc_address(cpu_reg, "ggml_backend_dev_get_extra_bufts");
if (ggml_backend_dev_get_extra_bufts_fn) {
ggml_backend_buffer_type_t * extra_bufts = ggml_backend_dev_get_extra_bufts_fn(cpu_dev);
while (extra_bufts && *extra_bufts) {
buft_extra.emplace_back(*extra_bufts);
++extra_bufts;
}
}
}
// add tensors
for (auto & it : ab_map) {
<|fim_middle|>str_endswith(name, ".lora_a")
|
ow std::runtime_error("invalid gguf type for " + key);
}
const auto arr_type = gguf_get_arr_type(ctx_gguf.get(), kid);
if (arr_type != GGUF_TYPE_UINT32) {
throw std::runtime_error("invalid gguf element type for " + key);
}
const size_t seq_len = gguf_get_arr_n(ctx_gguf.get(), kid);
const void * data = gguf_get_arr_data(ctx_gguf.get(), kid);
adapter.alora_invocation_tokens.resize(seq_len);
std::copy(
(const llama_token *)data,
(const llama_token *)data + seq_len,
adapter.alora_invocation_tokens.begin());
}
}
int n_tensors = gguf_get_n_tensors(ctx_gguf.get());
// contexts for each buffer type
std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
auto it = ctx_map.find(buft);
if (it == ctx_map.end()) {
// add a new context
ggml_init_params params = {
/*.mem_size =*/ n_tensors*ggml_tensor_overhead(),
/*.mem_buffer =*/ NULL,
/*.no_alloc =*/ true,
};
ggml_context * buft_ctx = ggml_init(params);
if (!buft_ctx) {
return nullptr;
}
ctx_map[buft] = buft_ctx;
adapter.ctxs.emplace_back(buft_ctx);
return buft_ctx;
};
return it->second;
};
// bundle lora_a and lora_b into pairs
std::map<std::string, llama_adapter_lora_weight> ab_map;
auto str_endswith = [](const std::string & str, const std::string & suffix) {
return str.size() >= suffix.size() && str.compare(str.size()-suffix.size(), suffix.size(), suffix) == 0;
};
for (ggml_tensor * cur = ggml_get_first_tensor(ctx.get()); cur; cur = ggml_get_next_tensor(ctx.get(), cur)) {
std::string name(cur->name);
if (
|
str_endswith(name, ".lora_a")
|
) {
replace_all(name, ".lora_a", "");
if (ab_map.find(name) == ab_map.end()) {
ab_map[name] = llama_adapter_lora_weight(cur, nullptr);
} else {
ab_map[name].a = cur;
}
} else if (str_endswith(name, ".lora_b")) {
replace_all(name, ".lora_b", "");
if (ab_map.find(name) == ab_map.end()) {
ab_map[name] = llama_adapter_lora_weight(nullptr, cur);
} else {
ab_map[name].b = cur;
}
} else if (str_endswith(name, "_norm.weight")) {
// TODO: add support for norm vector
// for now, we don't really care because most adapters still work fine without it
continue;
} else {
throw std::runtime_error("LoRA tensor '" + name + "' has unexpected suffix");
}
}
// get extra buffer types of the CPU
// TODO: a more general solution for non-CPU extra buft should be imlpemented in the future
// ref: https://github.com/ggml-org/llama.cpp/pull/12593#pullrequestreview-2718659948
std::vector<ggml_backend_buffer_type_t> buft_extra;
{
auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
if (!cpu_dev) {
throw std::runtime_error(format("%s: no CPU backend found", __func__));
}
auto * cpu_reg = ggml_backend_dev_backend_reg(cpu_dev);
auto ggml_backend_dev_get_extra_bufts_fn = (ggml_backend_dev_get_extra_bufts_t)
ggml_backend_reg_get_proc_address(cpu_reg, "ggml_backend_dev_get_extra_bufts");
if (ggml_backend_dev_get_extra_bufts_fn) {
ggml_backend_buffer_type_t * extra_bufts = ggml_backend_dev_get_extra_bufts_fn(cpu_dev);
while (extra_bufts && *extra_bufts) {
buft_extra.emplace_back(*extra_bufts);
++extra_bufts;
}
}
}
// add tensors
for (auto & it : ab_map) {
|
ast_based
|
<|fim_prefix|>pc/grpc.h>
#include <grpc/impl/connectivity_state.h>
#include <grpc/slice.h>
#include <grpc/support/alloc.h>
#include <grpc/support/time.h>
#include <grpcpp/channel.h>
#include <grpcpp/client_context.h>
#include <grpcpp/completion_queue.h>
#include <grpcpp/impl/call.h>
#include <grpcpp/impl/call_op_set_interface.h>
#include <grpcpp/impl/completion_queue_tag.h>
#include <grpcpp/impl/rpc_method.h>
#include <grpcpp/impl/sync.h>
#include <grpcpp/support/client_interceptor.h>
#include <grpcpp/support/slice.h>
#include <atomic>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/util/grpc_check.h"
namespace grpc {
Channel::Channel(
const std::string& host, grpc_channel* channel,
std::vector<
std::unique_ptr<grpc::experimental::ClientInterceptorFactoryInterface>>
interceptor_creators)
: host_(host), c_channel_(channel) {
interceptor_creators_ = std::move(interceptor_creators);
}
Channel::~Channel() {
grpc_channel_destroy(c_channel_);
CompletionQueue* callback_cq = callback_cq_.load(std::memory_order_relaxed);
if (callback_cq != nullptr) {
if (grpc_iomgr_run_in_background()) {
// gRPC-core provides the backing needed for the preferred CQ type
callback_cq->Shutdown();
} else {
CompletionQueue::ReleaseCallbackAlternativeCQ(callback_cq);
}
}
}
namespace {
inline grpc_slice SliceFromArray(const char* arr, size_t len) {
return grpc_slice_from_copied_buffer(arr, len);
}
std::string GetChannelInfoField(grpc_channel* channel,
grpc_channel_info* channel_info,
char*** channel_info_field) {
char* value = nullptr;
memset(channel_info, 0, sizeof(*channel_info));
*channel_info_field = &value;
grpc_channel_get_info(channel, channel_info);
if (value == nullptr) return "";
std::string result = value;
<|fim_suffix|>;
return result;
}
} // namespace
std::string Channel::GetLoadBalancingPolicyName() const {
grpc_channel_info channel_info;
return GetChannelInfoField(c_channel_, &channel_info,
&channel_info.lb_policy_name);
}
std::string Channel::GetServiceConfigJSON() const {
grpc_channel_info channel_info;
return GetChannelInfoField(c_channel_, &channel_info,
&channel_info.service_config_json);
}
namespace experimental {
void ChannelResetConnectionBackoff(Channel* channel) {
grpc_channel_reset_connect_backoff(channel->c_channel_);
}
int64_t ChannelGetChannelzUuid(Channel* channel) {
auto* node = grpc_channel_get_channelz_node(channel->c_channel_);
if (node == nullptr) return 0;
return node->uuid();
}
} // namespace experimental
grpc::internal::Call Channel::CreateCallInternal(
const grpc::internal::RpcMethod& method, grpc::ClientContext* context,
grpc::CompletionQueue* cq, size_t interceptor_pos) {
const bool kRegistered = method.channel_tag() && context->authority().empty();
grpc_call* c_call = nullptr;
if (kRegistered) {
c_call = grpc_channel_create_registered_call(
c_channel_, context->propagate_from_call_,
context->propagation_options_.c_bitmask(), cq->cq(),
method.channel_tag(), context->raw_deadline(), nullptr);
} else {
const ::std::string* host_str = nullptr;
if (!context->authority_.empty()) {
host_str = &context->authority_;
} else if (!host_.empty()) {
host_str = &host_;
}
grpc_slice method_slice =
SliceFromArray(method.name(), strlen(method.name()));
grpc_slice host_slice;
if (host_str != nullptr) {
host_slice = grpc::SliceFromCopiedString(*host_str);
}
c_call = grpc_channel_create_call(
c_channel_, context->propagate_from_call_,
context->propagation_options_.c_bitmask(), cq->cq(), method_slice,
host_str == nullptr ? nullptr : &host_slice, context->raw_de<|fim_middle|>gpr_free(value)
|
pc/grpc.h>
#include <grpc/impl/connectivity_state.h>
#include <grpc/slice.h>
#include <grpc/support/alloc.h>
#include <grpc/support/time.h>
#include <grpcpp/channel.h>
#include <grpcpp/client_context.h>
#include <grpcpp/completion_queue.h>
#include <grpcpp/impl/call.h>
#include <grpcpp/impl/call_op_set_interface.h>
#include <grpcpp/impl/completion_queue_tag.h>
#include <grpcpp/impl/rpc_method.h>
#include <grpcpp/impl/sync.h>
#include <grpcpp/support/client_interceptor.h>
#include <grpcpp/support/slice.h>
#include <atomic>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/util/grpc_check.h"
namespace grpc {
Channel::Channel(
const std::string& host, grpc_channel* channel,
std::vector<
std::unique_ptr<grpc::experimental::ClientInterceptorFactoryInterface>>
interceptor_creators)
: host_(host), c_channel_(channel) {
interceptor_creators_ = std::move(interceptor_creators);
}
Channel::~Channel() {
grpc_channel_destroy(c_channel_);
CompletionQueue* callback_cq = callback_cq_.load(std::memory_order_relaxed);
if (callback_cq != nullptr) {
if (grpc_iomgr_run_in_background()) {
// gRPC-core provides the backing needed for the preferred CQ type
callback_cq->Shutdown();
} else {
CompletionQueue::ReleaseCallbackAlternativeCQ(callback_cq);
}
}
}
namespace {
inline grpc_slice SliceFromArray(const char* arr, size_t len) {
return grpc_slice_from_copied_buffer(arr, len);
}
std::string GetChannelInfoField(grpc_channel* channel,
grpc_channel_info* channel_info,
char*** channel_info_field) {
char* value = nullptr;
memset(channel_info, 0, sizeof(*channel_info));
*channel_info_field = &value;
grpc_channel_get_info(channel, channel_info);
if (value == nullptr) return "";
std::string result = value;
|
gpr_free(value)
|
;
return result;
}
} // namespace
std::string Channel::GetLoadBalancingPolicyName() const {
grpc_channel_info channel_info;
return GetChannelInfoField(c_channel_, &channel_info,
&channel_info.lb_policy_name);
}
std::string Channel::GetServiceConfigJSON() const {
grpc_channel_info channel_info;
return GetChannelInfoField(c_channel_, &channel_info,
&channel_info.service_config_json);
}
namespace experimental {
void ChannelResetConnectionBackoff(Channel* channel) {
grpc_channel_reset_connect_backoff(channel->c_channel_);
}
int64_t ChannelGetChannelzUuid(Channel* channel) {
auto* node = grpc_channel_get_channelz_node(channel->c_channel_);
if (node == nullptr) return 0;
return node->uuid();
}
} // namespace experimental
grpc::internal::Call Channel::CreateCallInternal(
const grpc::internal::RpcMethod& method, grpc::ClientContext* context,
grpc::CompletionQueue* cq, size_t interceptor_pos) {
const bool kRegistered = method.channel_tag() && context->authority().empty();
grpc_call* c_call = nullptr;
if (kRegistered) {
c_call = grpc_channel_create_registered_call(
c_channel_, context->propagate_from_call_,
context->propagation_options_.c_bitmask(), cq->cq(),
method.channel_tag(), context->raw_deadline(), nullptr);
} else {
const ::std::string* host_str = nullptr;
if (!context->authority_.empty()) {
host_str = &context->authority_;
} else if (!host_.empty()) {
host_str = &host_;
}
grpc_slice method_slice =
SliceFromArray(method.name(), strlen(method.name()));
grpc_slice host_slice;
if (host_str != nullptr) {
host_slice = grpc::SliceFromCopiedString(*host_str);
}
c_call = grpc_channel_create_call(
c_channel_, context->propagate_from_call_,
context->propagation_options_.c_bitmask(), cq->cq(), method_slice,
host_str == nullptr ? nullptr : &host_slice, context->raw_de
|
ast_based
|
<|fim_prefix|>// Part of the Carbon Language project, under the Apache License v2.0 with LLVM
// Exceptions. See /LICENSE for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#ifndef CARBON_TOOLCHAIN_BASE_BLOCK_VALUE_STORE_H_
#define CARBON_TOOLCHAIN_BASE_BLOCK_VALUE_STORE_H_
#include <type_traits>
#include "common/check.h"
#include "common/set.h"
#include "llvm/Support/Allocator.h"
#include "toolchain/base/mem_usage.h"
#include "toolchain/base/value_store.h"
#include "toolchain/base/yaml.h"
namespace Carbon::SemIR {
// Provides a block-based ValueStore, which uses slab allocation of added
// blocks. This allows references to values to outlast vector resizes that might
// otherwise invalidate references.
//
// BlockValueStore is used as-is, but there are also children that expose the
// protected members for type-specific functionality.
template <typename IdT, typename ElementT>
class BlockValueStore : public Yaml::Printable<BlockValueStore<IdT, ElementT>> {
public:
using IdType = IdT;
using ElementType = ElementT;
using RefType = llvm::MutableArrayRef<ElementT>;
using ConstRefType = llvm::ArrayRef<ElementT>;
explicit BlockValueStore(llvm::BumpPtrAllocator& allocator)
: allocator_(&allocator) <|fim_suffix|>
CARBON_CHECK(empty_val.key() == IdT::Empty);
}
// Adds a block with the given content, returning an ID to reference it.
auto Add(ConstRefType content) -> IdT {
if (content.empty()) {
return IdT::Empty;
}
return values_.Add(AllocateCopy(content));
}
// Returns the requested block.
auto Get(IdT id) const -> ConstRefType { return values_.Get(id); }
// Returns a mutable view of the requested block. This operation should be
// avoided where possible; we generally want blocks to be immutable once
// created.
auto GetMutable(IdT id) -> RefType { return values_.Get(id); }
// Returns a new block formed by applying `transform(elem_id)` to each element
// in the specified block.
template <typename TransformFnT>
auto Transform(IdT id, TransformFnT transform) -> IdT {
llvm::SmallVector<ElementType> block(llvm::map_range(Get(id), transform));
return Add(block);
}
// Adds a block or finds an existing canonical block with the given content,
// and returns an ID to reference it.
auto AddCanonical(ConstRefType content) -> IdT {
if (content.empty()) {
return IdT::Empty;
}
auto result = canonical_blocks_.Insert(
content, [&] { return Add(content); }, KeyContext(this));
return result.key();
}
// Promotes an existing block ID to a canonical block ID, or returns an
// existing canonical block ID if the block was already added. The specified
// block must not be modified after this point.
auto MakeCanonical(IdT id) -> IdT {
// Get the content first so that we don't have unnecessary translation of
// the `id` into the content during insertion.
auto result = canonical_blocks_.Insert(
Get(id), [id] { return id; }, KeyContext(this));
return result.key();
}
auto OutputYaml() const -> Yaml::OutputMapping {
return Yaml::OutputMapping([&](Yaml::OutputMapping::Map map) {
for (auto [block_id, block] : values_.enumerate()) {
map.Add(PrintToStr<|fim_middle|>{
auto empty = RefType();
auto empty_val = canonical_blocks_.Insert(
empty, [&] { return values_.Add(empty); }, KeyContext(this));
|
// Part of the Carbon Language project, under the Apache License v2.0 with LLVM
// Exceptions. See /LICENSE for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#ifndef CARBON_TOOLCHAIN_BASE_BLOCK_VALUE_STORE_H_
#define CARBON_TOOLCHAIN_BASE_BLOCK_VALUE_STORE_H_
#include <type_traits>
#include "common/check.h"
#include "common/set.h"
#include "llvm/Support/Allocator.h"
#include "toolchain/base/mem_usage.h"
#include "toolchain/base/value_store.h"
#include "toolchain/base/yaml.h"
namespace Carbon::SemIR {
// Provides a block-based ValueStore, which uses slab allocation of added
// blocks. This allows references to values to outlast vector resizes that might
// otherwise invalidate references.
//
// BlockValueStore is used as-is, but there are also children that expose the
// protected members for type-specific functionality.
template <typename IdT, typename ElementT>
class BlockValueStore : public Yaml::Printable<BlockValueStore<IdT, ElementT>> {
public:
using IdType = IdT;
using ElementType = ElementT;
using RefType = llvm::MutableArrayRef<ElementT>;
using ConstRefType = llvm::ArrayRef<ElementT>;
explicit BlockValueStore(llvm::BumpPtrAllocator& allocator)
: allocator_(&allocator)
|
{
auto empty = RefType();
auto empty_val = canonical_blocks_.Insert(
empty, [&] { return values_.Add(empty); }, KeyContext(this));
|
CARBON_CHECK(empty_val.key() == IdT::Empty);
}
// Adds a block with the given content, returning an ID to reference it.
auto Add(ConstRefType content) -> IdT {
if (content.empty()) {
return IdT::Empty;
}
return values_.Add(AllocateCopy(content));
}
// Returns the requested block.
auto Get(IdT id) const -> ConstRefType { return values_.Get(id); }
// Returns a mutable view of the requested block. This operation should be
// avoided where possible; we generally want blocks to be immutable once
// created.
auto GetMutable(IdT id) -> RefType { return values_.Get(id); }
// Returns a new block formed by applying `transform(elem_id)` to each element
// in the specified block.
template <typename TransformFnT>
auto Transform(IdT id, TransformFnT transform) -> IdT {
llvm::SmallVector<ElementType> block(llvm::map_range(Get(id), transform));
return Add(block);
}
// Adds a block or finds an existing canonical block with the given content,
// and returns an ID to reference it.
auto AddCanonical(ConstRefType content) -> IdT {
if (content.empty()) {
return IdT::Empty;
}
auto result = canonical_blocks_.Insert(
content, [&] { return Add(content); }, KeyContext(this));
return result.key();
}
// Promotes an existing block ID to a canonical block ID, or returns an
// existing canonical block ID if the block was already added. The specified
// block must not be modified after this point.
auto MakeCanonical(IdT id) -> IdT {
// Get the content first so that we don't have unnecessary translation of
// the `id` into the content during insertion.
auto result = canonical_blocks_.Insert(
Get(id), [id] { return id; }, KeyContext(this));
return result.key();
}
auto OutputYaml() const -> Yaml::OutputMapping {
return Yaml::OutputMapping([&](Yaml::OutputMapping::Map map) {
for (auto [block_id, block] : values_.enumerate()) {
map.Add(PrintToStr
|
ast_based
|
<|fim_prefix|>(moving_selection_offset.x) > CMP_EPSILON || (snapped_time > moving_selection_pivot && time_delta > CMP_EPSILON) || (snapped_time < moving_selection_pivot && time_delta < -CMP_EPSILON)) {
time_offset = snapped_time - moving_selection_pivot;
}
float moving_selection_begin_value;
if (moving_selection_from_key == -1) {
moving_selection_begin_value = (get_size().height / 2.0 - moving_selection_mouse_begin.y) * timeline_v_zoom + timeline_v_scroll;
} else {
moving_selection_begin_value = animation->bezier_track_get_key_value(moving_selection_from_track, moving_selection_from_key);
}
float y_offset = y - moving_selection_begin_value;
moving_selection_offset = Vector2(time_offset, y_offset);
}
additional_moving_handle_lefts.clear();
additional_moving_handle_rights.clear();
queue_redraw();
}
if (box_selecting_attempt && mm.is_valid()) {
if (!box_selecting) {
box_selecting = true;
box_selecting_add = mm->is_shift_pressed();
}
box_selection_to = mm->get_position();
queue_redraw();
}
if (scaling_selection && mm.is_valid() && !read_only) {
Point2 mp = mm->get_position();
const int handle_length = Math::round((selection_handles_rect.size.width - selection_rect.size.width) / 4.0);
Point2 rel_pos;
// Calculate the scale according with the distance between the mouse's position (adjusted so that the cursor appears inside the handles)
// and the opposite end of the `selection_rect`.
if (scaling_selection_handles.x != 0) {
if (scaling_selection_handles.x == 1) { // Right Handle
const int handle_adjust = Math::round(mp.x - (scaling_selection_scale.x >= 0 ? selection_rect.position.x : (selection_rect.position.x + selection_rect.size.width)));
mp.x -= MIN(Math::abs(handle_adjust), handle_length) * scaling_selection_handles.x * SIGN(handle_adjust);
if (editor->is_snap_keys_enabled()) {
mp.x = editor->snap_time((mp.x - limit) / timeline->get_zoom_scale(), true) + timeline->get_value();
<|fim_suffix|>
}
rel_pos.x = scaling_selection_scale.x >= 0 ? (mp.x - selection_rect.position.x) : selection_rect.position.x + selection_rect.size.width - mp.x;
} else { // Left Handle
const int handle_adjust = Math::round((scaling_selection_scale.x >= 0 ? (selection_rect.position.x + selection_rect.size.width) : selection_rect.position.x) - mp.x);
mp.x -= MIN(Math::abs(handle_adjust), handle_length) * scaling_selection_handles.x * SIGN(handle_adjust);
const float x = editor->snap_time((mp.x - limit) / timeline->get_zoom_scale(), true) + timeline->get_value();
if (editor->is_snap_keys_enabled()) {
mp.x = (x - timeline->get_value()) * timeline->get_zoom_scale() + limit;
}
rel_pos.x = scaling_selection_scale.x >= 0 ? (selection_rect.position.x + selection_rect.size.width - mp.x) : (mp.x - selection_rect.position.x);
scaling_selection_offset.x = scaling_selection_pivot.x - x;
}
scaling_selection_scale.x *= rel_pos.x / selection_rect.size.width;
if (scaling_selection_scale.x == 0) {
scaling_selection_scale.x = CMP_EPSILON;
}
}
if (scaling_selection_handles.y != 0) {
if (scaling_selection_handles.y == 1) { // Bottom Handle
const int handle_adjust = Math::round(mp.y - (scaling_selection_scale.y >= 0 ? selection_rect.position.y : (selection_rect.position.y + selection_rect.size.height)));
mp.y -= MIN(Math::abs(handle_adjust), handle_length) * scaling_selection_handles.y * SIGN(handle_adjust);
if (scaling_selection_scale.y >= 0) {
rel_pos.y = mp.y - selection_rect.position.y;
} else {
rel_pos.y = selection_rect.position.y + selection_rect.size.height - mp.y;
}
} else { // Top Handle
const int handle_adjust = Math::round((scaling_selection_scale.y >= 0 ? (selection_rect.position.y + selection_rect.size.height) : selection_rect.position.y) - mp.y);
mp.y -= MIN(Math::abs(handle_adjust), handle_length) * scaling_selection_handles.y * SIGN(handle_adjust);
if (scaling_selectio<|fim_middle|>mp.x = (mp.x - timeline->get_value()) * timeline->get_zoom_scale() + limit;
|
(moving_selection_offset.x) > CMP_EPSILON || (snapped_time > moving_selection_pivot && time_delta > CMP_EPSILON) || (snapped_time < moving_selection_pivot && time_delta < -CMP_EPSILON)) {
time_offset = snapped_time - moving_selection_pivot;
}
float moving_selection_begin_value;
if (moving_selection_from_key == -1) {
moving_selection_begin_value = (get_size().height / 2.0 - moving_selection_mouse_begin.y) * timeline_v_zoom + timeline_v_scroll;
} else {
moving_selection_begin_value = animation->bezier_track_get_key_value(moving_selection_from_track, moving_selection_from_key);
}
float y_offset = y - moving_selection_begin_value;
moving_selection_offset = Vector2(time_offset, y_offset);
}
additional_moving_handle_lefts.clear();
additional_moving_handle_rights.clear();
queue_redraw();
}
if (box_selecting_attempt && mm.is_valid()) {
if (!box_selecting) {
box_selecting = true;
box_selecting_add = mm->is_shift_pressed();
}
box_selection_to = mm->get_position();
queue_redraw();
}
if (scaling_selection && mm.is_valid() && !read_only) {
Point2 mp = mm->get_position();
const int handle_length = Math::round((selection_handles_rect.size.width - selection_rect.size.width) / 4.0);
Point2 rel_pos;
// Calculate the scale according with the distance between the mouse's position (adjusted so that the cursor appears inside the handles)
// and the opposite end of the `selection_rect`.
if (scaling_selection_handles.x != 0) {
if (scaling_selection_handles.x == 1) { // Right Handle
const int handle_adjust = Math::round(mp.x - (scaling_selection_scale.x >= 0 ? selection_rect.position.x : (selection_rect.position.x + selection_rect.size.width)));
mp.x -= MIN(Math::abs(handle_adjust), handle_length) * scaling_selection_handles.x * SIGN(handle_adjust);
if (editor->is_snap_keys_enabled()) {
mp.x = editor->snap_time((mp.x - limit) / timeline->get_zoom_scale(), true) + timeline->get_value();
|
mp.x = (mp.x - timeline->get_value()) * timeline->get_zoom_scale() + limit;
|
}
rel_pos.x = scaling_selection_scale.x >= 0 ? (mp.x - selection_rect.position.x) : selection_rect.position.x + selection_rect.size.width - mp.x;
} else { // Left Handle
const int handle_adjust = Math::round((scaling_selection_scale.x >= 0 ? (selection_rect.position.x + selection_rect.size.width) : selection_rect.position.x) - mp.x);
mp.x -= MIN(Math::abs(handle_adjust), handle_length) * scaling_selection_handles.x * SIGN(handle_adjust);
const float x = editor->snap_time((mp.x - limit) / timeline->get_zoom_scale(), true) + timeline->get_value();
if (editor->is_snap_keys_enabled()) {
mp.x = (x - timeline->get_value()) * timeline->get_zoom_scale() + limit;
}
rel_pos.x = scaling_selection_scale.x >= 0 ? (selection_rect.position.x + selection_rect.size.width - mp.x) : (mp.x - selection_rect.position.x);
scaling_selection_offset.x = scaling_selection_pivot.x - x;
}
scaling_selection_scale.x *= rel_pos.x / selection_rect.size.width;
if (scaling_selection_scale.x == 0) {
scaling_selection_scale.x = CMP_EPSILON;
}
}
if (scaling_selection_handles.y != 0) {
if (scaling_selection_handles.y == 1) { // Bottom Handle
const int handle_adjust = Math::round(mp.y - (scaling_selection_scale.y >= 0 ? selection_rect.position.y : (selection_rect.position.y + selection_rect.size.height)));
mp.y -= MIN(Math::abs(handle_adjust), handle_length) * scaling_selection_handles.y * SIGN(handle_adjust);
if (scaling_selection_scale.y >= 0) {
rel_pos.y = mp.y - selection_rect.position.y;
} else {
rel_pos.y = selection_rect.position.y + selection_rect.size.height - mp.y;
}
} else { // Top Handle
const int handle_adjust = Math::round((scaling_selection_scale.y >= 0 ? (selection_rect.position.y + selection_rect.size.height) : selection_rect.position.y) - mp.y);
mp.y -= MIN(Math::abs(handle_adjust), handle_length) * scaling_selection_handles.y * SIGN(handle_adjust);
if (scaling_selectio
|
ast_based
|
<|fim_prefix|>ine.empty();
if (!is_skip) {
string_process_escapes(line);
output.push_back(line);
}
}
file.close();
return output;
}
//////////////////////////////////////////////////
static bool cb_eval(struct ggml_tensor * t, bool ask, void * user_data) {
auto * cb_data = (callback_data *) user_data;
static const char * l_out_name = "l_out";
const bool is_l_out = strncmp(t->name, l_out_name, strlen(l_out_name)) == 0;
if (ask) {
return is_l_out;
}
if (!is_l_out || t->ne[1] != cb_data->n_tokens) {
return true;
}
// save the tensor to current context
cb_data->save_tensor_for_layer(t);
return true;
}
static bool get_hidden_layers(llama_context * ctx, std::vector<llama_token> & tokens) {
llama_memory_clear(llama_get_memory(ctx), true);
if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size()))) {
fprintf(stderr, "%s : failed to eval\n", __func__);
return false;
}
return true;
}
static void export_gguf(const std::vector<struct ggml_tensor *> & v_ctrl, const std::string fname, const std::string model_hint) {
struct gguf_context * ctx = gguf_init_empty();
const std::string arch = "controlvector";
gguf_set_val_str(ctx, "general.architecture", arch.c_str());
gguf_set_val_str(ctx, (arch + ".model_hint").c_str(), model_hint.c_str());
gguf_set_val_i32(ctx, (arch + ".layer_count").c_str(), v_ctrl.size());
for (size_t i = 0; i < v_ctrl.size(); ++i) {
gguf_add_tensor(ctx, v_ctrl[i]);
print_debug_tensor(v_ctrl[i]);
printf("Added tensor: %s\n", v_ctrl[i]->name);
}
printf("%s: writing file...\n", __func__);
gguf_write_to_file(ctx, fname.c_str(), false);
printf("%s: wrote file '%s'\n", __func__, fname.c_str());
gguf_free(ctx);
}
/**
* Load prompt files and completion file.
* Then format each pair of prompt + completion to make an entry.
*/
static int prepare_entries(<|fim_suffix|>, train_context & ctx_train) {
// load prompts
std::vector<std::string> positive_prompts = ctrlvec_load_prompt_file(params.cvector_positive_file, true);
std::vector<std::string> negative_prompts = ctrlvec_load_prompt_file(params.cvector_negative_file, true);
if (positive_prompts.size() != negative_prompts.size()) {
fprintf(stderr, "number of positive and negative prompts must be equal\n");
return 1;
}
if (positive_prompts.empty()) {
fprintf(stderr, "must provide at least one prompt pair\n");
return 1;
}
ctx_train.positive_entries = positive_prompts;
ctx_train.negative_entries = negative_prompts;
return 0;
}
int main(int argc, char ** argv) {
common_params params;
params.out_file = "control_vector.gguf";
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_CVECTOR_GENERATOR, print_usage)) {
return 1;
}
if (params.n_pca_iterations % params.n_pca_batch != 0) {
fprintf(stderr, "PCA iterations must by multiply of PCA batch size\n");
return 1;
}
callback_data cb_data;
// pass the callback to the backend scheduler
// it will be executed for each node during the graph computation
params.cb_eval = cb_eval;
params.cb_eval_user_data = &cb_data;
params.warmup = false;
print_build_info();
llama_backend_init();
llama_numa_init(params.numa);
// load the model to get hparams
common_init_result llama_init = common_init_from_params(params);
llama_model * model = llama_init.model.get();
llama_context * ctx = llama_init.context.get();
// int n_ctx = llama_n_ctx(ctx);
int n_layers = llama_model_n_layer(model);
int n_embd = llama_model_n_embd(model);
// get model hint param (a.k.a model arch name)
char model_hint[128];
llama_model_meta_val_str(model, "general.architecture", model_hint, 128);
// init train_context
train_context ctx_train(n_embd, n_layers);
// load <|fim_middle|>common_params & params
|
ine.empty();
if (!is_skip) {
string_process_escapes(line);
output.push_back(line);
}
}
file.close();
return output;
}
//////////////////////////////////////////////////
static bool cb_eval(struct ggml_tensor * t, bool ask, void * user_data) {
auto * cb_data = (callback_data *) user_data;
static const char * l_out_name = "l_out";
const bool is_l_out = strncmp(t->name, l_out_name, strlen(l_out_name)) == 0;
if (ask) {
return is_l_out;
}
if (!is_l_out || t->ne[1] != cb_data->n_tokens) {
return true;
}
// save the tensor to current context
cb_data->save_tensor_for_layer(t);
return true;
}
static bool get_hidden_layers(llama_context * ctx, std::vector<llama_token> & tokens) {
llama_memory_clear(llama_get_memory(ctx), true);
if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size()))) {
fprintf(stderr, "%s : failed to eval\n", __func__);
return false;
}
return true;
}
static void export_gguf(const std::vector<struct ggml_tensor *> & v_ctrl, const std::string fname, const std::string model_hint) {
struct gguf_context * ctx = gguf_init_empty();
const std::string arch = "controlvector";
gguf_set_val_str(ctx, "general.architecture", arch.c_str());
gguf_set_val_str(ctx, (arch + ".model_hint").c_str(), model_hint.c_str());
gguf_set_val_i32(ctx, (arch + ".layer_count").c_str(), v_ctrl.size());
for (size_t i = 0; i < v_ctrl.size(); ++i) {
gguf_add_tensor(ctx, v_ctrl[i]);
print_debug_tensor(v_ctrl[i]);
printf("Added tensor: %s\n", v_ctrl[i]->name);
}
printf("%s: writing file...\n", __func__);
gguf_write_to_file(ctx, fname.c_str(), false);
printf("%s: wrote file '%s'\n", __func__, fname.c_str());
gguf_free(ctx);
}
/**
* Load prompt files and completion file.
* Then format each pair of prompt + completion to make an entry.
*/
static int prepare_entries(
|
common_params & params
|
, train_context & ctx_train) {
// load prompts
std::vector<std::string> positive_prompts = ctrlvec_load_prompt_file(params.cvector_positive_file, true);
std::vector<std::string> negative_prompts = ctrlvec_load_prompt_file(params.cvector_negative_file, true);
if (positive_prompts.size() != negative_prompts.size()) {
fprintf(stderr, "number of positive and negative prompts must be equal\n");
return 1;
}
if (positive_prompts.empty()) {
fprintf(stderr, "must provide at least one prompt pair\n");
return 1;
}
ctx_train.positive_entries = positive_prompts;
ctx_train.negative_entries = negative_prompts;
return 0;
}
int main(int argc, char ** argv) {
common_params params;
params.out_file = "control_vector.gguf";
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_CVECTOR_GENERATOR, print_usage)) {
return 1;
}
if (params.n_pca_iterations % params.n_pca_batch != 0) {
fprintf(stderr, "PCA iterations must by multiply of PCA batch size\n");
return 1;
}
callback_data cb_data;
// pass the callback to the backend scheduler
// it will be executed for each node during the graph computation
params.cb_eval = cb_eval;
params.cb_eval_user_data = &cb_data;
params.warmup = false;
print_build_info();
llama_backend_init();
llama_numa_init(params.numa);
// load the model to get hparams
common_init_result llama_init = common_init_from_params(params);
llama_model * model = llama_init.model.get();
llama_context * ctx = llama_init.context.get();
// int n_ctx = llama_n_ctx(ctx);
int n_layers = llama_model_n_layer(model);
int n_embd = llama_model_n_embd(model);
// get model hint param (a.k.a model arch name)
char model_hint[128];
llama_model_meta_val_str(model, "general.architecture", model_hint, 128);
// init train_context
train_context ctx_train(n_embd, n_layers);
// load
|
ast_based
|
<|fim_prefix|> sel.anchor.character_index = start_pos;
sel.focus.node = (accesskit_node_id)end_rid.get_id();
sel.focus.character_index = end_pos;
accesskit_node_set_text_selection(ae->node, sel);
}
void AccessibilityDriverAccessKit::accessibility_update_set_flag(const RID &p_id, DisplayServer::AccessibilityFlags p_flag, bool p_value) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
switch (p_flag) {
case DisplayServer::AccessibilityFlags::FLAG_HIDDEN: {
if (p_value) {
accesskit_node_set_hidden(ae->node);
} else {
accesskit_node_clear_hidden(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_MULTISELECTABLE: {
if (p_value) {
accesskit_node_set_multiselectable(ae->node);
} else {
accesskit_node_clear_multiselectable(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_REQUIRED: {
if (p_value) {
accesskit_node_set_required(ae->node);
} else {
accesskit_node_clear_required(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_VISITED: {
if (p_value) {
accesskit_node_set_visited(ae->node);
} else {
accesskit_node_clear_visited(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_BUSY: {
if (p_value) {
accesskit_node_set_busy(ae->node);
} else {
accesskit_node_clear_busy(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_MODAL: {
if (p_value) {
accesskit_node_set_modal(ae->node);
} else {
accesskit_node_clear_modal(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_TOUCH_PASSTHROUGH: {
if (p_value) {
accesskit_node_set_touch_transparent(ae->node);
} else {
accesskit_node_clear_touch_transparent(ae->node);
}
} break;<|fim_suffix|> } else {
accesskit_node_clear_disabled(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_CLIPS_CHILDREN: {
if (p_value) {
accesskit_node_set_clips_children(ae->node);
} else {
accesskit_node_clear_clips_children(ae->node);
}
} break;
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_classname(const RID &p_id, const String &p_classname) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_classname.is_empty()) {
accesskit_node_set_class_name(ae->node, p_classname.utf8().ptr());
} else {
accesskit_node_clear_class_name(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_placeholder(const RID &p_id, const String &p_placeholder) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_placeholder.is_empty()) {
accesskit_node_set_placeholder(ae->node, p_placeholder.utf8().ptr());
} else {
accesskit_node_clear_placeholder(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_language(const RID &p_id, const String &p_language) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_language(ae->node, p_language.utf8().ptr());
}
void AccessibilityDriverAccessKit::accessibility_update_set_text_orientation(const RID &p_id, bool p_vertical) {<|fim_middle|> case DisplayServer::AccessibilityFlags::FLAG_READONLY: {
if (p_value) {
accesskit_node_set_read_only(ae->node);
} else {
accesskit_node_clear_read_only(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_DISABLED: {
if (p_value) {
accesskit_node_set_disabled(ae->node);
|
sel.anchor.character_index = start_pos;
sel.focus.node = (accesskit_node_id)end_rid.get_id();
sel.focus.character_index = end_pos;
accesskit_node_set_text_selection(ae->node, sel);
}
void AccessibilityDriverAccessKit::accessibility_update_set_flag(const RID &p_id, DisplayServer::AccessibilityFlags p_flag, bool p_value) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
switch (p_flag) {
case DisplayServer::AccessibilityFlags::FLAG_HIDDEN: {
if (p_value) {
accesskit_node_set_hidden(ae->node);
} else {
accesskit_node_clear_hidden(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_MULTISELECTABLE: {
if (p_value) {
accesskit_node_set_multiselectable(ae->node);
} else {
accesskit_node_clear_multiselectable(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_REQUIRED: {
if (p_value) {
accesskit_node_set_required(ae->node);
} else {
accesskit_node_clear_required(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_VISITED: {
if (p_value) {
accesskit_node_set_visited(ae->node);
} else {
accesskit_node_clear_visited(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_BUSY: {
if (p_value) {
accesskit_node_set_busy(ae->node);
} else {
accesskit_node_clear_busy(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_MODAL: {
if (p_value) {
accesskit_node_set_modal(ae->node);
} else {
accesskit_node_clear_modal(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_TOUCH_PASSTHROUGH: {
if (p_value) {
accesskit_node_set_touch_transparent(ae->node);
} else {
accesskit_node_clear_touch_transparent(ae->node);
}
} break;
|
case DisplayServer::AccessibilityFlags::FLAG_READONLY: {
if (p_value) {
accesskit_node_set_read_only(ae->node);
} else {
accesskit_node_clear_read_only(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_DISABLED: {
if (p_value) {
accesskit_node_set_disabled(ae->node);
|
} else {
accesskit_node_clear_disabled(ae->node);
}
} break;
case DisplayServer::AccessibilityFlags::FLAG_CLIPS_CHILDREN: {
if (p_value) {
accesskit_node_set_clips_children(ae->node);
} else {
accesskit_node_clear_clips_children(ae->node);
}
} break;
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_classname(const RID &p_id, const String &p_classname) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_classname.is_empty()) {
accesskit_node_set_class_name(ae->node, p_classname.utf8().ptr());
} else {
accesskit_node_clear_class_name(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_placeholder(const RID &p_id, const String &p_placeholder) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
if (!p_placeholder.is_empty()) {
accesskit_node_set_placeholder(ae->node, p_placeholder.utf8().ptr());
} else {
accesskit_node_clear_placeholder(ae->node);
}
}
void AccessibilityDriverAccessKit::accessibility_update_set_language(const RID &p_id, const String &p_language) {
ERR_FAIL_COND_MSG(!in_accessibility_update, "Accessibility updates are only allowed inside the NOTIFICATION_ACCESSIBILITY_UPDATE notification.");
AccessibilityElement *ae = rid_owner.get_or_null(p_id);
ERR_FAIL_NULL(ae);
_ensure_node(p_id, ae);
accesskit_node_set_language(ae->node, p_language.utf8().ptr());
}
void AccessibilityDriverAccessKit::accessibility_update_set_text_orientation(const RID &p_id, bool p_vertical) {
|
random
|
<|fim_prefix|>/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/aot/aot_only_var_handle_op.h"
#include "tensorflow/compiler/tf2xla/xla_context.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
namespace {
// Implementation of varhandle that binds a VarHandleOp to an XlaResource of the
// same name. It is not safe to use this op in a JIT context.
class XlaAotOnlyVarHandleOp : public XlaOpKernel {
public:
explicit XlaAotOnlyVarHandleOp(OpKernelConstruction* c);
void Compile(XlaOpKernelContext* context) override;
private:
string name_;
};
XlaAotOnlyVarHandleOp::XlaAotOnlyVarHandleOp(OpKernelConstruction* c)
: XlaOpKernel(c) {
OP_REQUIRES_OK(c, c->GetAttr("shared_name", &name_));
}
void XlaAotOnlyVarHandleOp::Compile(<|fim_suffix|>) {
// Look for a resource of the same name. TF also keys that on the container
// and type attributes, but that doesn't seem necessary.
for (const auto& resource : context->xla_context()->resources()) {
if (resource->kind() == XlaResource::kVariable &&
resource->name() == name_) {
context->SetResourceOutput(0, resource.get());
return;
}
}
context->SetStatus(
errors::InvalidArgument("Variable: ", name_, " not configured"));
}
} // namespace
REGISTER_OP(tfcompile::kXlaAotOnlyVarHandleOp)
.Doc(R"doc(
Internal VarHandleOp registration used for XLA AOT compilation.
)doc")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("debug_name: string = ''")
.Attr("dtype: type")
.Attr("shape: shape")
.Output("resource: resource")
.SetIsStateful()
.SetShapeFn([](shape_inference::InferenceContext* c) {
c->set_output(0, c->Scalar());
DataType t;
TF_RETURN_IF_ERROR(c->GetAttr("dtype", &t));
PartialTensorShape p;
TF_RETURN_IF_ERROR(c->GetAttr("shape", &p));
shape_inference::ShapeHandle s;
TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(p, &s));
c->set_output_handle_shapes_and_types(
0, std::vector<shape_inference::ShapeAndType>{{s, t}});
return absl::OkStatus();
});
REGISTER_XLA_OP(Name(tfcompile::kXlaAotOnlyVarHandleOp).CompilationOnly(),
XlaAotOnlyVarHandleOp);
} // namespace tensorflow
<|fim_middle|>XlaOpKernelContext* context
|
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/aot/aot_only_var_handle_op.h"
#include "tensorflow/compiler/tf2xla/xla_context.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
namespace {
// Implementation of varhandle that binds a VarHandleOp to an XlaResource of the
// same name. It is not safe to use this op in a JIT context.
class XlaAotOnlyVarHandleOp : public XlaOpKernel {
public:
explicit XlaAotOnlyVarHandleOp(OpKernelConstruction* c);
void Compile(XlaOpKernelContext* context) override;
private:
string name_;
};
XlaAotOnlyVarHandleOp::XlaAotOnlyVarHandleOp(OpKernelConstruction* c)
: XlaOpKernel(c) {
OP_REQUIRES_OK(c, c->GetAttr("shared_name", &name_));
}
void XlaAotOnlyVarHandleOp::Compile(
|
XlaOpKernelContext* context
|
) {
// Look for a resource of the same name. TF also keys that on the container
// and type attributes, but that doesn't seem necessary.
for (const auto& resource : context->xla_context()->resources()) {
if (resource->kind() == XlaResource::kVariable &&
resource->name() == name_) {
context->SetResourceOutput(0, resource.get());
return;
}
}
context->SetStatus(
errors::InvalidArgument("Variable: ", name_, " not configured"));
}
} // namespace
REGISTER_OP(tfcompile::kXlaAotOnlyVarHandleOp)
.Doc(R"doc(
Internal VarHandleOp registration used for XLA AOT compilation.
)doc")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("debug_name: string = ''")
.Attr("dtype: type")
.Attr("shape: shape")
.Output("resource: resource")
.SetIsStateful()
.SetShapeFn([](shape_inference::InferenceContext* c) {
c->set_output(0, c->Scalar());
DataType t;
TF_RETURN_IF_ERROR(c->GetAttr("dtype", &t));
PartialTensorShape p;
TF_RETURN_IF_ERROR(c->GetAttr("shape", &p));
shape_inference::ShapeHandle s;
TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(p, &s));
c->set_output_handle_shapes_and_types(
0, std::vector<shape_inference::ShapeAndType>{{s, t}});
return absl::OkStatus();
});
REGISTER_XLA_OP(Name(tfcompile::kXlaAotOnlyVarHandleOp).CompilationOnly(),
XlaAotOnlyVarHandleOp);
} // namespace tensorflow
|
ast_based
|
<|fim_prefix|>int block_num = 0;
int par_num = 0;
int line_num = 0;
int word_num = 0;
std::string tsv_str;
tsv_str += "1\t" + std::to_string(page_num); // level 1 - page
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
tsv_str += "\t" + std::to_string(rect_left_);
tsv_str += "\t" + std::to_string(rect_top_);
tsv_str += "\t" + std::to_string(rect_width_);
tsv_str += "\t" + std::to_string(rect_height_);
tsv_str += "\t-1\t\n";
const std::unique_ptr</*non-const*/ ResultIterator> res_it(GetIterator());
while (!res_it->Empty(RIL_BLOCK)) {
if (res_it->Empty(RIL_WORD)) {
res_it->Next(RIL_WORD);
continue;
}
// Add rows for any new block/paragraph/textline.
if (res_it->IsAtBeginningOf(RIL_BLOCK)) {
block_num++;
par_num = 0;
line_num = 0;
word_num = 0;
tsv_str += "2\t" + std::to_string(page_num); // level 2 - block
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_BLOCK, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for block
}
if (res_it->IsAtBeginningOf(RIL_PARA)) {
par_num++;
line_num = 0;
word_num = 0;
tsv_str += "3\t" + std::to_string(page_num); // level 3 - paragraph
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_PARA, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for para
}
if (res_it->IsAtBeginningOf(RIL_TEXTLINE)) {
line_num++;
word_num = 0;
tsv_str += "4\t" + std::to_string(page_num); // level 4 - line
tsv_str += "\t" + <|fim_suffix|>;
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_TEXTLINE, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for line
}
// Now, process the word...
int left, top, right, bottom;
res_it->BoundingBox(RIL_WORD, &left, &top, &right, &bottom);
word_num++;
tsv_str += "5\t" + std::to_string(page_num); // level 5 - word
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
tsv_str += "\t" + std::to_string(left);
tsv_str += "\t" + std::to_string(top);
tsv_str += "\t" + std::to_string(right - left);
tsv_str += "\t" + std::to_string(bottom - top);
tsv_str += "\t" + std::to_string(res_it->Confidence(RIL_WORD));
tsv_str += "\t";
#if !defined(NDEBUG)
// Increment counts if at end of block/paragraph/textline.
if (res_it->IsAtFinalElement(RIL_TEXTLINE, RIL_WORD)) {
lcnt++;
}
if (res_it->IsAtFinalElement(RIL_PARA, RIL_WORD)) {
pcnt++;
}
if (res_it->IsAtFinalElement(RIL_BLOCK, RIL_WORD)) {
bcnt++;
}
#endif
do {
tsv_str += std::unique_ptr<const char[]>(res_it->GetUTF8Text(RIL_SYMBOL)).get();
res_it->Next(RIL_SYMBOL);
} while (!res_it->Empty(RIL_BLOCK) && !res_it->IsAtBeginningOf(RIL_WORD));
tsv_str += "\n"; // end of row
#if !defined(NDEBUG)
wcnt++;
#endif
}
return copy_string(tsv_str);
}
/** The 5 numbers output for each box (the usual 4 and a page number.) */
const int kNumbersPerBlob = 5;
/**
* The number of bytes taken by each number. Since we use int16_t for ICOORD,
* assume only 5 digits max.
*/
const int kBytesPerNumber = 5;
/**
* Multiplier for max expected textlength assumes (kBytesPerNumber + space)
* * kNumbersPerBlob plus the newline. Add to this the
* original UTF<|fim_middle|>std::to_string(block_num)
|
int block_num = 0;
int par_num = 0;
int line_num = 0;
int word_num = 0;
std::string tsv_str;
tsv_str += "1\t" + std::to_string(page_num); // level 1 - page
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
tsv_str += "\t" + std::to_string(rect_left_);
tsv_str += "\t" + std::to_string(rect_top_);
tsv_str += "\t" + std::to_string(rect_width_);
tsv_str += "\t" + std::to_string(rect_height_);
tsv_str += "\t-1\t\n";
const std::unique_ptr</*non-const*/ ResultIterator> res_it(GetIterator());
while (!res_it->Empty(RIL_BLOCK)) {
if (res_it->Empty(RIL_WORD)) {
res_it->Next(RIL_WORD);
continue;
}
// Add rows for any new block/paragraph/textline.
if (res_it->IsAtBeginningOf(RIL_BLOCK)) {
block_num++;
par_num = 0;
line_num = 0;
word_num = 0;
tsv_str += "2\t" + std::to_string(page_num); // level 2 - block
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_BLOCK, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for block
}
if (res_it->IsAtBeginningOf(RIL_PARA)) {
par_num++;
line_num = 0;
word_num = 0;
tsv_str += "3\t" + std::to_string(page_num); // level 3 - paragraph
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_PARA, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for para
}
if (res_it->IsAtBeginningOf(RIL_TEXTLINE)) {
line_num++;
word_num = 0;
tsv_str += "4\t" + std::to_string(page_num); // level 4 - line
tsv_str += "\t" +
|
std::to_string(block_num)
|
;
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
AddBoxToTSV(res_it.get(), RIL_TEXTLINE, tsv_str);
tsv_str += "\t-1\t\n"; // end of row for line
}
// Now, process the word...
int left, top, right, bottom;
res_it->BoundingBox(RIL_WORD, &left, &top, &right, &bottom);
word_num++;
tsv_str += "5\t" + std::to_string(page_num); // level 5 - word
tsv_str += "\t" + std::to_string(block_num);
tsv_str += "\t" + std::to_string(par_num);
tsv_str += "\t" + std::to_string(line_num);
tsv_str += "\t" + std::to_string(word_num);
tsv_str += "\t" + std::to_string(left);
tsv_str += "\t" + std::to_string(top);
tsv_str += "\t" + std::to_string(right - left);
tsv_str += "\t" + std::to_string(bottom - top);
tsv_str += "\t" + std::to_string(res_it->Confidence(RIL_WORD));
tsv_str += "\t";
#if !defined(NDEBUG)
// Increment counts if at end of block/paragraph/textline.
if (res_it->IsAtFinalElement(RIL_TEXTLINE, RIL_WORD)) {
lcnt++;
}
if (res_it->IsAtFinalElement(RIL_PARA, RIL_WORD)) {
pcnt++;
}
if (res_it->IsAtFinalElement(RIL_BLOCK, RIL_WORD)) {
bcnt++;
}
#endif
do {
tsv_str += std::unique_ptr<const char[]>(res_it->GetUTF8Text(RIL_SYMBOL)).get();
res_it->Next(RIL_SYMBOL);
} while (!res_it->Empty(RIL_BLOCK) && !res_it->IsAtBeginningOf(RIL_WORD));
tsv_str += "\n"; // end of row
#if !defined(NDEBUG)
wcnt++;
#endif
}
return copy_string(tsv_str);
}
/** The 5 numbers output for each box (the usual 4 and a page number.) */
const int kNumbersPerBlob = 5;
/**
* The number of bytes taken by each number. Since we use int16_t for ICOORD,
* assume only 5 digits max.
*/
const int kBytesPerNumber = 5;
/**
* Multiplier for max expected textlength assumes (kBytesPerNumber + space)
* * kNumbersPerBlob plus the newline. Add to this the
* original UTF
|
ast_based
|
<|fim_prefix|>hint_string` is required for `PROPERTY_HINT_RANGE`.
p_property.hint_string = "0,0,1";
}
p_property.usage |= PROPERTY_USAGE_KEYING_INCREMENTS;
}
}
void AnimatedSprite2D::_notification(int p_what) {
switch (p_what) {
case NOTIFICATION_ACCESSIBILITY_UPDATE: {
RID ae = get_accessibility_element();
ERR_FAIL_COND(ae.is_null());
Rect2 dst_rect = _get_rect();
DisplayServer::get_singleton()->accessibility_update_set_role(ae, DisplayServer::AccessibilityRole::ROLE_IMAGE);
DisplayServer::get_singleton()->accessibility_update_set_transform(ae, get_transform());
DisplayServer::get_singleton()->accessibility_update_set_bounds(ae, dst_rect);
} break;
case NOTIFICATION_READY: {
if (!Engine::get_singleton()->is_editor_hint() && frames.is_valid() && frames->has_animation(autoplay)) {
play(autoplay);
}
} break;
case NOTIFICATION_INTERNAL_PROCESS: {
if (frames.is_null() || !frames->has_animation(animation)) {
return;
}
double remaining = get_process_delta_time();
int i = 0;
while (remaining) {
// Animation speed may be changed by animation_finished or frame_changed signals.
double speed = frames->get_animation_speed(animation) * speed_scale * custom_speed_scale * frame_speed_scale;
double abs_speed = Math::abs(speed);
if (speed == 0) {
return; // Do nothing.
}
// Frame count may be changed by animation_finished or frame_changed signals.
int fc = frames->get_frame_count(animation);
int last_frame = fc - 1;
if (!std::signbit(speed)) {
// Forwards.
if (frame_progress >= 1.0) {
if (frame >= last_frame) {
if (frames->get_animation_loop(animation)) {
frame = 0;
emit_signal("animation_looped");
} else {
frame = last_frame;
pause();
emit_signal(SceneStringName(animation_finished));
return;
}
} else {
frame++;
}
_calc_frame_speed_scale();
frame_progress = 0.0;
<|fim_suffix|>
emit_signal(SceneStringName(frame_changed));
}
double to_process = MIN((1.0 - frame_progress) / abs_speed, remaining);
frame_progress += to_process * abs_speed;
remaining -= to_process;
} else {
// Backwards.
if (frame_progress <= 0) {
if (frame <= 0) {
if (frames->get_animation_loop(animation)) {
frame = last_frame;
emit_signal("animation_looped");
} else {
frame = 0;
pause();
emit_signal(SceneStringName(animation_finished));
return;
}
} else {
frame--;
}
_calc_frame_speed_scale();
frame_progress = 1.0;
queue_redraw();
emit_signal(SceneStringName(frame_changed));
}
double to_process = MIN(frame_progress / abs_speed, remaining);
frame_progress -= to_process * abs_speed;
remaining -= to_process;
}
i++;
if (i > fc) {
return; // Prevents freezing if to_process is each time much less than remaining.
}
}
} break;
case NOTIFICATION_DRAW: {
if (frames.is_null() || !frames->has_animation(animation)) {
return;
}
Ref<Texture2D> texture = frames->get_frame_texture(animation, frame);
if (texture.is_null()) {
return;
}
RID ci = get_canvas_item();
Size2 s = texture->get_size();
Point2 ofs = offset;
if (centered) {
ofs -= s / 2;
}
if (get_viewport() && get_viewport()->is_snap_2d_transforms_to_pixel_enabled()) {
ofs = (ofs + Point2(0.5, 0.5)).floor();
}
Rect2 dst_rect(ofs, s);
if (hflip) {
dst_rect.size.x = -dst_rect.size.x;
}
if (vflip) {
dst_rect.size.y = -dst_rect.size.y;
}
texture->draw_rect_region(ci, dst_rect, Rect2(Vector2(), texture->get_size()), Color(1, 1, 1), false);
} break;
}
}
void AnimatedSprite2D::set_sprite_frames(const Ref<SpriteFrames> &p_frames) {
if (frames == p_frames) {
return;
}
if (frames.is_valid()) {
frames->disconnect(CoreStringName(changed), callable_mp<|fim_middle|>queue_redraw();
|
hint_string` is required for `PROPERTY_HINT_RANGE`.
p_property.hint_string = "0,0,1";
}
p_property.usage |= PROPERTY_USAGE_KEYING_INCREMENTS;
}
}
void AnimatedSprite2D::_notification(int p_what) {
switch (p_what) {
case NOTIFICATION_ACCESSIBILITY_UPDATE: {
RID ae = get_accessibility_element();
ERR_FAIL_COND(ae.is_null());
Rect2 dst_rect = _get_rect();
DisplayServer::get_singleton()->accessibility_update_set_role(ae, DisplayServer::AccessibilityRole::ROLE_IMAGE);
DisplayServer::get_singleton()->accessibility_update_set_transform(ae, get_transform());
DisplayServer::get_singleton()->accessibility_update_set_bounds(ae, dst_rect);
} break;
case NOTIFICATION_READY: {
if (!Engine::get_singleton()->is_editor_hint() && frames.is_valid() && frames->has_animation(autoplay)) {
play(autoplay);
}
} break;
case NOTIFICATION_INTERNAL_PROCESS: {
if (frames.is_null() || !frames->has_animation(animation)) {
return;
}
double remaining = get_process_delta_time();
int i = 0;
while (remaining) {
// Animation speed may be changed by animation_finished or frame_changed signals.
double speed = frames->get_animation_speed(animation) * speed_scale * custom_speed_scale * frame_speed_scale;
double abs_speed = Math::abs(speed);
if (speed == 0) {
return; // Do nothing.
}
// Frame count may be changed by animation_finished or frame_changed signals.
int fc = frames->get_frame_count(animation);
int last_frame = fc - 1;
if (!std::signbit(speed)) {
// Forwards.
if (frame_progress >= 1.0) {
if (frame >= last_frame) {
if (frames->get_animation_loop(animation)) {
frame = 0;
emit_signal("animation_looped");
} else {
frame = last_frame;
pause();
emit_signal(SceneStringName(animation_finished));
return;
}
} else {
frame++;
}
_calc_frame_speed_scale();
frame_progress = 0.0;
|
queue_redraw();
|
emit_signal(SceneStringName(frame_changed));
}
double to_process = MIN((1.0 - frame_progress) / abs_speed, remaining);
frame_progress += to_process * abs_speed;
remaining -= to_process;
} else {
// Backwards.
if (frame_progress <= 0) {
if (frame <= 0) {
if (frames->get_animation_loop(animation)) {
frame = last_frame;
emit_signal("animation_looped");
} else {
frame = 0;
pause();
emit_signal(SceneStringName(animation_finished));
return;
}
} else {
frame--;
}
_calc_frame_speed_scale();
frame_progress = 1.0;
queue_redraw();
emit_signal(SceneStringName(frame_changed));
}
double to_process = MIN(frame_progress / abs_speed, remaining);
frame_progress -= to_process * abs_speed;
remaining -= to_process;
}
i++;
if (i > fc) {
return; // Prevents freezing if to_process is each time much less than remaining.
}
}
} break;
case NOTIFICATION_DRAW: {
if (frames.is_null() || !frames->has_animation(animation)) {
return;
}
Ref<Texture2D> texture = frames->get_frame_texture(animation, frame);
if (texture.is_null()) {
return;
}
RID ci = get_canvas_item();
Size2 s = texture->get_size();
Point2 ofs = offset;
if (centered) {
ofs -= s / 2;
}
if (get_viewport() && get_viewport()->is_snap_2d_transforms_to_pixel_enabled()) {
ofs = (ofs + Point2(0.5, 0.5)).floor();
}
Rect2 dst_rect(ofs, s);
if (hflip) {
dst_rect.size.x = -dst_rect.size.x;
}
if (vflip) {
dst_rect.size.y = -dst_rect.size.y;
}
texture->draw_rect_region(ci, dst_rect, Rect2(Vector2(), texture->get_size()), Color(1, 1, 1), false);
} break;
}
}
void AnimatedSprite2D::set_sprite_frames(const Ref<SpriteFrames> &p_frames) {
if (frames == p_frames) {
return;
}
if (frames.is_valid()) {
frames->disconnect(CoreStringName(changed), callable_mp
|
ast_based
|
<|fim_prefix|>{
return mConfIntervalsState;
}
bool calib::calibController::getRMSState() const
{
return mCalibData->totalAvgErr < 0.5;
}
int calib::calibController::getNewFlags() const
{
return mCalibFlags;
}
//////////////////// calibDataController
double calib::calibDataController::estimateGridSubsetQuality(size_t excludedIndex)
{
{
int gridSize = 10;
int xGridStep = mCalibData->imageSize.width / gridSize;
int yGridStep = mCalibData->imageSize.height / gridSize;
std::vector<int> pointsInCell(gridSize*gridSize);
std::fill(pointsInCell.begin(), pointsInCell.end(), 0);
for(size_t k = 0; k < mCalibData->imagePoints.size(); k++)
if(k != excludedIndex)
for(std::vector<cv::Point2f>::iterator pointIt = mCalibData->imagePoints[k].begin(); pointIt != mCalibData->imagePoints[k].end(); ++pointIt) {
int i = (int)((*pointIt).x / xGridStep);
int j = (int)((*pointIt).y / yGridStep);
pointsInCell[i*gridSize + j]++;
}
for(size_t k = 0; k < mCalibData->allCharucoCorners.size(); k++)
if(k != excludedIndex)
for(int l = 0; l < mCalibData->allCharucoCorners[k].size[0]; l++) {
int i = (int)(mCalibData->allCharucoCorners[k].at<float>(l, 0) / xGridStep);
int j = (int)(mCalibData->allCharucoCorners[k].at<float>(l, 1) / yGridStep);
pointsInCell[i*gridSize + j]++;
}
cv::Mat mean, stdDev;
cv::meanStdDev(pointsInCell, mean, stdDev);
return mean.at<double>(0) / (stdDev.at<double>(0) + 1e-7);
}
}
calib::calibDataController::calibDataController(cv::Ptr<calib::calibrationData> data, int maxFrames, double convParameter) :
mCalibData(data), mParamsFileName("CamParams.xml")
{
mMaxFramesNum = maxFrames;
mAlpha = convParameter;
}<|fim_suffix|>{
size_t numberOfFrames = std::max(mCalibData->allCharucoIds.size(), mCalibData->imagePoints.size());
CV_Assert(numberOfFrames == mCalibData->perViewErrors.total());
if(numberOfFrames >= mMaxFramesNum) {
double worstValue = -HUGE_VAL, maxQuality = estimateGridSubsetQuality(numberOfFrames);
size_t worstElemIndex = 0;
for(size_t i = 0; i < numberOfFrames; i++) {
double gridQDelta = estimateGridSubsetQuality(i) - maxQuality;
double currentValue = mCalibData->perViewErrors.at<double>((int)i)*mAlpha + gridQDelta*(1. - mAlpha);
if(currentValue > worstValue) {
worstValue = currentValue;
worstElemIndex = i;
}
}
showOverlayMessage(cv::format("Frame %zu is worst", worstElemIndex + 1));
if(mCalibData->allFrames.size())
mCalibData->allFrames.erase(mCalibData->allFrames.begin() + worstElemIndex);
if(mCalibData->imagePoints.size()) {
mCalibData->imagePoints.erase(mCalibData->imagePoints.begin() + worstElemIndex);
mCalibData->objectPoints.erase(mCalibData->objectPoints.begin() + worstElemIndex);
if (mCalibData->allCharucoCorners.size()) {
mCalibData->allCharucoCorners.erase(mCalibData->allCharucoCorners.begin() + worstElemIndex);
mCalibData->allCharucoIds.erase(mCalibData->allCharucoIds.begin() + worstElemIndex);
}
}
cv::Mat newErrorsVec = cv::Mat((int)numberOfFrames - 1, 1, CV_64F);
std::copy(mCalibData->perViewErrors.ptr<double>(0),
mCalibData->perViewErrors.ptr<double>((int)worstElemIndex), newErrorsVec.ptr<double>(0));
if((int)worstElemIndex < (int)numberOfFrames-1) {
std::copy(mCalibData->perViewErrors.ptr<double>((int)worstElemIndex + 1), mCalibData->perViewErrors.ptr<double>((int)numberOfFrames),
newErrorsVec.ptr<double>((int)worstElemIndex));
}<|fim_middle|>
calib::calibDataController::calibDataController()
{
}
void calib::calibDataController::filterFrames()
|
{
return mConfIntervalsState;
}
bool calib::calibController::getRMSState() const
{
return mCalibData->totalAvgErr < 0.5;
}
int calib::calibController::getNewFlags() const
{
return mCalibFlags;
}
//////////////////// calibDataController
double calib::calibDataController::estimateGridSubsetQuality(size_t excludedIndex)
{
{
int gridSize = 10;
int xGridStep = mCalibData->imageSize.width / gridSize;
int yGridStep = mCalibData->imageSize.height / gridSize;
std::vector<int> pointsInCell(gridSize*gridSize);
std::fill(pointsInCell.begin(), pointsInCell.end(), 0);
for(size_t k = 0; k < mCalibData->imagePoints.size(); k++)
if(k != excludedIndex)
for(std::vector<cv::Point2f>::iterator pointIt = mCalibData->imagePoints[k].begin(); pointIt != mCalibData->imagePoints[k].end(); ++pointIt) {
int i = (int)((*pointIt).x / xGridStep);
int j = (int)((*pointIt).y / yGridStep);
pointsInCell[i*gridSize + j]++;
}
for(size_t k = 0; k < mCalibData->allCharucoCorners.size(); k++)
if(k != excludedIndex)
for(int l = 0; l < mCalibData->allCharucoCorners[k].size[0]; l++) {
int i = (int)(mCalibData->allCharucoCorners[k].at<float>(l, 0) / xGridStep);
int j = (int)(mCalibData->allCharucoCorners[k].at<float>(l, 1) / yGridStep);
pointsInCell[i*gridSize + j]++;
}
cv::Mat mean, stdDev;
cv::meanStdDev(pointsInCell, mean, stdDev);
return mean.at<double>(0) / (stdDev.at<double>(0) + 1e-7);
}
}
calib::calibDataController::calibDataController(cv::Ptr<calib::calibrationData> data, int maxFrames, double convParameter) :
mCalibData(data), mParamsFileName("CamParams.xml")
{
mMaxFramesNum = maxFrames;
mAlpha = convParameter;
}
|
calib::calibDataController::calibDataController()
{
}
void calib::calibDataController::filterFrames()
|
{
size_t numberOfFrames = std::max(mCalibData->allCharucoIds.size(), mCalibData->imagePoints.size());
CV_Assert(numberOfFrames == mCalibData->perViewErrors.total());
if(numberOfFrames >= mMaxFramesNum) {
double worstValue = -HUGE_VAL, maxQuality = estimateGridSubsetQuality(numberOfFrames);
size_t worstElemIndex = 0;
for(size_t i = 0; i < numberOfFrames; i++) {
double gridQDelta = estimateGridSubsetQuality(i) - maxQuality;
double currentValue = mCalibData->perViewErrors.at<double>((int)i)*mAlpha + gridQDelta*(1. - mAlpha);
if(currentValue > worstValue) {
worstValue = currentValue;
worstElemIndex = i;
}
}
showOverlayMessage(cv::format("Frame %zu is worst", worstElemIndex + 1));
if(mCalibData->allFrames.size())
mCalibData->allFrames.erase(mCalibData->allFrames.begin() + worstElemIndex);
if(mCalibData->imagePoints.size()) {
mCalibData->imagePoints.erase(mCalibData->imagePoints.begin() + worstElemIndex);
mCalibData->objectPoints.erase(mCalibData->objectPoints.begin() + worstElemIndex);
if (mCalibData->allCharucoCorners.size()) {
mCalibData->allCharucoCorners.erase(mCalibData->allCharucoCorners.begin() + worstElemIndex);
mCalibData->allCharucoIds.erase(mCalibData->allCharucoIds.begin() + worstElemIndex);
}
}
cv::Mat newErrorsVec = cv::Mat((int)numberOfFrames - 1, 1, CV_64F);
std::copy(mCalibData->perViewErrors.ptr<double>(0),
mCalibData->perViewErrors.ptr<double>((int)worstElemIndex), newErrorsVec.ptr<double>(0));
if((int)worstElemIndex < (int)numberOfFrames-1) {
std::copy(mCalibData->perViewErrors.ptr<double>((int)worstElemIndex + 1), mCalibData->perViewErrors.ptr<double>((int)numberOfFrames),
newErrorsVec.ptr<double>((int)worstElemIndex));
}
|
random
|
<|fim_prefix|> auto decoded = hex::crypt::decode16(encoded);
TEST_ASSERT(decoded == original, "decoded: {} encoded: '{}' original: {}", decoded, encoded, original);
}
if (hex::crypt::encode16({ 0x00, 0x2a }) == "2A") {
hex::log::error("Known bug: in function hex::crypt::encode16 mbedtls_mpi_read_binary ingores initial null bytes");
TEST_FAIL();
}
TEST_SUCCESS();
};
std::string vectorToString(std::vector<u8> in) {
return std::string(reinterpret_cast<char *>(in.data()), in.size());
}
std::vector<u8> stringToVector(std::string in) {
return std::vector<u8>(in.begin(), in.end());
}
TEST_SEQUENCE("EncodeDecode64") {
std::array golden_samples = {
// source: linux command base64 (from GNU coreutils)
EncodeChek {{}, "" },
EncodeChek { { 0x2a }, "Kg==" },
EncodeChek { { 0x00, 0x2a }, "ACo=" },
EncodeChek { { 0x2a, 0x00 }, "KgA=" },
EncodeChek { { 0x42, 0xff, 0x55 }, "Qv9V" },
EncodeChek { { 0xde, 0xad, 0xbe, 0xef, 0x42, 0x2a, 0x00, 0xff }, "3q2+70IqAP8="},
};
for (auto &i : golden_samples) {
std::string string;
TEST_ASSERT((string = vectorToString(hex::crypt::encode64(i.vec))) == i.string, "string: '{}' i.string: '{}' from: {}", string, i.string, i.vec);
std::vector<u8> vec;
TEST_ASSERT((vec = hex::crypt::decode64(stringToVector(i.string))) == i.vec, "vec: {} i.vec: {} from: '{}'", vec, i.vec, i.string);
}
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution dataLen(0, 1024);
std::uniform_int_distribution<u8> data;
for (int i = 0; i < 1000; i++) {
std::vector<u8> original(dataLen(gen));
std::generate(std::begin(original), std::end(original), [&] { <|fim_suffix|> });
auto encoded = vectorToString(hex::crypt::encode64(original));
auto decoded = hex::crypt::decode64(stringToVector(encoded));
TEST_ASSERT(decoded == original, "decoded: {} encoded: '{}' original: {}", decoded, encoded, original);
}
TEST_SUCCESS();
};
TEST_SEQUENCE("EncodeDecodeLEB128") {
TEST_ASSERT(hex::crypt::encodeUleb128(0) == (std::vector<u8>{ 0 }));
TEST_ASSERT(hex::crypt::encodeUleb128(0x7F) == (std::vector<u8>{ 0x7F }));
TEST_ASSERT(hex::crypt::encodeUleb128(0xFF) == (std::vector<u8>{ 0xFF, 0x01 }));
TEST_ASSERT(hex::crypt::encodeUleb128(0xF0F0) == (std::vector<u8>{ 0xF0, 0xE1, 0x03 }));
TEST_ASSERT(hex::crypt::encodeSleb128(0) == (std::vector<u8>{ 0 }));
TEST_ASSERT(hex::crypt::encodeSleb128(0x7F) == (std::vector<u8>{ 0xFF, 0x00 }));
TEST_ASSERT(hex::crypt::encodeSleb128(0xFF) == (std::vector<u8>{ 0xFF, 0x01 }));
TEST_ASSERT(hex::crypt::encodeSleb128(0xF0F0) == (std::vector<u8>{ 0xF0, 0xE1, 0x03 }));
TEST_ASSERT(hex::crypt::encodeSleb128(-1) == (std::vector<u8>{ 0x7F }));
TEST_ASSERT(hex::crypt::encodeSleb128(-128) == (std::vector<u8>{ 0x80, 0x7F }));
TEST_ASSERT(hex::crypt::decodeUleb128({}) == 0);
TEST_ASSERT(hex::crypt::decodeUleb128({ 1 }) == 0x01);
TEST_ASSERT(hex::crypt::decodeUleb128({ 0x7F }) == 0x7F);
TEST_ASSERT(hex::crypt::decodeUleb128({ 0xFF }) == 0x7F);
TEST_ASSERT(hex::crypt::decodeUleb128({ 0xFF, 0x7F }) == 0x3FFF);
TEST_ASSERT(hex::crypt::decodeUleb128({
0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x7F,
}) == ((static_cast<u128>(0xFFFF'FFFF'FFFF) << 64) | 0xFFFF'FFFF'FFFF'FFFF));
TEST_ASSERT(hex::crypt::decodeUleb128({ 0xAA, 0xBB, 0xCC, 0x00, 0xFF }) == 0x131DAA);
TEST_ASSERT(hex::crypt::decodeSleb128({}) == 0);
TEST_ASSERT(hex::crypt::decodeSleb128({ 1 }) == 0x01);
TEST_ASSERT(hex::crypt::decodeSleb128({ 0x3F }) == 0x3F);
TEST_ASSERT(h<|fim_middle|>return data(gen);
|
auto decoded = hex::crypt::decode16(encoded);
TEST_ASSERT(decoded == original, "decoded: {} encoded: '{}' original: {}", decoded, encoded, original);
}
if (hex::crypt::encode16({ 0x00, 0x2a }) == "2A") {
hex::log::error("Known bug: in function hex::crypt::encode16 mbedtls_mpi_read_binary ingores initial null bytes");
TEST_FAIL();
}
TEST_SUCCESS();
};
std::string vectorToString(std::vector<u8> in) {
return std::string(reinterpret_cast<char *>(in.data()), in.size());
}
std::vector<u8> stringToVector(std::string in) {
return std::vector<u8>(in.begin(), in.end());
}
TEST_SEQUENCE("EncodeDecode64") {
std::array golden_samples = {
// source: linux command base64 (from GNU coreutils)
EncodeChek {{}, "" },
EncodeChek { { 0x2a }, "Kg==" },
EncodeChek { { 0x00, 0x2a }, "ACo=" },
EncodeChek { { 0x2a, 0x00 }, "KgA=" },
EncodeChek { { 0x42, 0xff, 0x55 }, "Qv9V" },
EncodeChek { { 0xde, 0xad, 0xbe, 0xef, 0x42, 0x2a, 0x00, 0xff }, "3q2+70IqAP8="},
};
for (auto &i : golden_samples) {
std::string string;
TEST_ASSERT((string = vectorToString(hex::crypt::encode64(i.vec))) == i.string, "string: '{}' i.string: '{}' from: {}", string, i.string, i.vec);
std::vector<u8> vec;
TEST_ASSERT((vec = hex::crypt::decode64(stringToVector(i.string))) == i.vec, "vec: {} i.vec: {} from: '{}'", vec, i.vec, i.string);
}
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution dataLen(0, 1024);
std::uniform_int_distribution<u8> data;
for (int i = 0; i < 1000; i++) {
std::vector<u8> original(dataLen(gen));
std::generate(std::begin(original), std::end(original), [&] {
|
return data(gen);
|
});
auto encoded = vectorToString(hex::crypt::encode64(original));
auto decoded = hex::crypt::decode64(stringToVector(encoded));
TEST_ASSERT(decoded == original, "decoded: {} encoded: '{}' original: {}", decoded, encoded, original);
}
TEST_SUCCESS();
};
TEST_SEQUENCE("EncodeDecodeLEB128") {
TEST_ASSERT(hex::crypt::encodeUleb128(0) == (std::vector<u8>{ 0 }));
TEST_ASSERT(hex::crypt::encodeUleb128(0x7F) == (std::vector<u8>{ 0x7F }));
TEST_ASSERT(hex::crypt::encodeUleb128(0xFF) == (std::vector<u8>{ 0xFF, 0x01 }));
TEST_ASSERT(hex::crypt::encodeUleb128(0xF0F0) == (std::vector<u8>{ 0xF0, 0xE1, 0x03 }));
TEST_ASSERT(hex::crypt::encodeSleb128(0) == (std::vector<u8>{ 0 }));
TEST_ASSERT(hex::crypt::encodeSleb128(0x7F) == (std::vector<u8>{ 0xFF, 0x00 }));
TEST_ASSERT(hex::crypt::encodeSleb128(0xFF) == (std::vector<u8>{ 0xFF, 0x01 }));
TEST_ASSERT(hex::crypt::encodeSleb128(0xF0F0) == (std::vector<u8>{ 0xF0, 0xE1, 0x03 }));
TEST_ASSERT(hex::crypt::encodeSleb128(-1) == (std::vector<u8>{ 0x7F }));
TEST_ASSERT(hex::crypt::encodeSleb128(-128) == (std::vector<u8>{ 0x80, 0x7F }));
TEST_ASSERT(hex::crypt::decodeUleb128({}) == 0);
TEST_ASSERT(hex::crypt::decodeUleb128({ 1 }) == 0x01);
TEST_ASSERT(hex::crypt::decodeUleb128({ 0x7F }) == 0x7F);
TEST_ASSERT(hex::crypt::decodeUleb128({ 0xFF }) == 0x7F);
TEST_ASSERT(hex::crypt::decodeUleb128({ 0xFF, 0x7F }) == 0x3FFF);
TEST_ASSERT(hex::crypt::decodeUleb128({
0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x7F,
}) == ((static_cast<u128>(0xFFFF'FFFF'FFFF) << 64) | 0xFFFF'FFFF'FFFF'FFFF));
TEST_ASSERT(hex::crypt::decodeUleb128({ 0xAA, 0xBB, 0xCC, 0x00, 0xFF }) == 0x131DAA);
TEST_ASSERT(hex::crypt::decodeSleb128({}) == 0);
TEST_ASSERT(hex::crypt::decodeSleb128({ 1 }) == 0x01);
TEST_ASSERT(hex::crypt::decodeSleb128({ 0x3F }) == 0x3F);
TEST_ASSERT(h
|
ast_based
|
<|fim_prefix|> }
// Allow no more than 8 redirections to prevent endless loops.
curlcode = curl_easy_setopt(curl, CURLOPT_MAXREDIRS, 8);
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
int timeout = curl_timeout;
if (timeout > 0) {
curlcode = curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1L);
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
curlcode = curl_easy_setopt(curl, CURLOPT_TIMEOUT, timeout);
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
}
std::string cookiefile = curl_cookiefile;
if (!cookiefile.empty()) {
curlcode = curl_easy_setopt(curl, CURLOPT_COOKIEFILE, cookiefile.c_str());
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
}
curlcode = curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
curlcode = curl_easy_setopt(curl, CURLOPT_WRITEDATA, &buf);
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
curlcode = curl_easy_setopt(curl, CURLOPT_USERAGENT, "Tesseract OCR");
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
curlcode = curl_easy_perform(curl);
if (curlcode != CURLE_OK) {
return error("curl_easy_perform");
}
curl_easy_cleanup(curl);
data = reinterpret_cast<const l_uint8 *>(buf.data());
}
#else
fprintf(stderr, "Error, this tesseract has no URL support\n");
return false;
#endif
} else {
// Check whether the input file can be read.
if (FILE *file = fopen(filename, "rb")) {
fclose(file);
} else {
fprintf(stderr, "Error, cannot read input file %s: %s\n", filename, strerror(errno));
return false;
}
}
// Here is our autodetection
int format;<|fim_suffix|> std::string s;
if (data != nullptr) {
s = buf.c_str();
} else {
std::ifstream t(filename);
std::string u((std::istreambuf_iterator<char>(t)), std::istreambuf_iterator<char>());
s = u.c_str();
}
return ProcessPagesFileList(nullptr, &s, retry_config, timeout_millisec, renderer,
tesseract_->tessedit_page_number);
}
// Maybe we have a TIFF which is potentially multipage
bool tiff = (format == IFF_TIFF || format == IFF_TIFF_PACKBITS || format == IFF_TIFF_RLE ||
format == IFF_TIFF_G3 || format == IFF_TIFF_G4 || format == IFF_TIFF_LZW ||
#if LIBLEPT_MAJOR_VERSION > 1 || LIBLEPT_MINOR_VERSION > 76
format == IFF_TIFF_JPEG ||
#endif
format == IFF_TIFF_ZIP);
// Fail early if we can, before producing any output
Pix *pix = nullptr;
if (!tiff) {
pix = (data != nullptr) ? pixReadMem(data, buf.size()) : pixRead(filename);
if (pix == nullptr) {
return false;
}
}
// Begin the output
if (renderer && !renderer->BeginDocument(document_title.c_str())) {
pixDestroy(&pix);
return false;
}
// Produce output
r = (tiff) ? ProcessPagesMultipageTiff(data, buf.size(), filename, retry_config, timeout_millisec,
renderer, tesseract_->tessedit_page_number)
: ProcessPage(pix, 0, filename, retry_config, timeout_millisec, renderer);
// Clean up memory as needed
pixDestroy(&pix);
// End the output
if (!r || (renderer && !renderer->EndDocument())) {
return false;
}
return true;
}
bool TessBaseAPI::ProcessPage(Pix *pix, int page_index, const char *filename,
const char *retry_config, int timeout_millisec,
TessResultRenderer *renderer) {
SetInputName(filename);
SetImage(pix);
bool failed = false;
if (tesseract_->tessedit_pageseg_mode == PSM_AUTO_ONLY) {
// Disabled character recognition<|fim_middle|> int r =
(data != nullptr) ? findFileFormatBuffer(data, &format) : findFileFormat(filename, &format);
// Maybe we have a filelist
if (r != 0 || format == IFF_UNKNOWN) {
|
}
// Allow no more than 8 redirections to prevent endless loops.
curlcode = curl_easy_setopt(curl, CURLOPT_MAXREDIRS, 8);
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
int timeout = curl_timeout;
if (timeout > 0) {
curlcode = curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1L);
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
curlcode = curl_easy_setopt(curl, CURLOPT_TIMEOUT, timeout);
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
}
std::string cookiefile = curl_cookiefile;
if (!cookiefile.empty()) {
curlcode = curl_easy_setopt(curl, CURLOPT_COOKIEFILE, cookiefile.c_str());
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
}
curlcode = curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
curlcode = curl_easy_setopt(curl, CURLOPT_WRITEDATA, &buf);
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
curlcode = curl_easy_setopt(curl, CURLOPT_USERAGENT, "Tesseract OCR");
if (curlcode != CURLE_OK) {
return error("curl_easy_setopt");
}
curlcode = curl_easy_perform(curl);
if (curlcode != CURLE_OK) {
return error("curl_easy_perform");
}
curl_easy_cleanup(curl);
data = reinterpret_cast<const l_uint8 *>(buf.data());
}
#else
fprintf(stderr, "Error, this tesseract has no URL support\n");
return false;
#endif
} else {
// Check whether the input file can be read.
if (FILE *file = fopen(filename, "rb")) {
fclose(file);
} else {
fprintf(stderr, "Error, cannot read input file %s: %s\n", filename, strerror(errno));
return false;
}
}
// Here is our autodetection
int format;
|
int r =
(data != nullptr) ? findFileFormatBuffer(data, &format) : findFileFormat(filename, &format);
// Maybe we have a filelist
if (r != 0 || format == IFF_UNKNOWN) {
|
std::string s;
if (data != nullptr) {
s = buf.c_str();
} else {
std::ifstream t(filename);
std::string u((std::istreambuf_iterator<char>(t)), std::istreambuf_iterator<char>());
s = u.c_str();
}
return ProcessPagesFileList(nullptr, &s, retry_config, timeout_millisec, renderer,
tesseract_->tessedit_page_number);
}
// Maybe we have a TIFF which is potentially multipage
bool tiff = (format == IFF_TIFF || format == IFF_TIFF_PACKBITS || format == IFF_TIFF_RLE ||
format == IFF_TIFF_G3 || format == IFF_TIFF_G4 || format == IFF_TIFF_LZW ||
#if LIBLEPT_MAJOR_VERSION > 1 || LIBLEPT_MINOR_VERSION > 76
format == IFF_TIFF_JPEG ||
#endif
format == IFF_TIFF_ZIP);
// Fail early if we can, before producing any output
Pix *pix = nullptr;
if (!tiff) {
pix = (data != nullptr) ? pixReadMem(data, buf.size()) : pixRead(filename);
if (pix == nullptr) {
return false;
}
}
// Begin the output
if (renderer && !renderer->BeginDocument(document_title.c_str())) {
pixDestroy(&pix);
return false;
}
// Produce output
r = (tiff) ? ProcessPagesMultipageTiff(data, buf.size(), filename, retry_config, timeout_millisec,
renderer, tesseract_->tessedit_page_number)
: ProcessPage(pix, 0, filename, retry_config, timeout_millisec, renderer);
// Clean up memory as needed
pixDestroy(&pix);
// End the output
if (!r || (renderer && !renderer->EndDocument())) {
return false;
}
return true;
}
bool TessBaseAPI::ProcessPage(Pix *pix, int page_index, const char *filename,
const char *retry_config, int timeout_millisec,
TessResultRenderer *renderer) {
SetInputName(filename);
SetImage(pix);
bool failed = false;
if (tesseract_->tessedit_pageseg_mode == PSM_AUTO_ONLY) {
// Disabled character recognition
|
random
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.