Feature Extraction
Transformers
PyTorch
e2d2
custom_code
e2d2-cnndm / config.json
yairschiff's picture
Update pytorch.bin; Add model and code
30e8556 verified
{
"T": 0,
"architectures": [
"E2D2"
],
"attn_backend": "sdpa",
"auto_map": {
"AutoConfig": "diffusion.E2D2Config",
"AutoModel": "diffusion.E2D2",
"AutoModelForMaskedLM": "diffusion.E2D2"
},
"backbone_config": {
"_target_": "backbone_encoder_decoder.LLMasEncoderDecoder",
"attn_backend": "sdpa",
"freeze_encoder": false,
"hidden_size": 256,
"intermediate_size": 768,
"keep_top_decoder_layers": false,
"keep_top_encoder_layers": false,
"max_length": 1024,
"num_decoder_layers": 8,
"num_encoder_layers": 20,
"pretrained_model_name_or_path": "Qwen/Qwen3-0.6B-Base",
"reinit_decoder": true,
"reinit_encoder": true,
"tie_encoder_decoder_weights": false,
"use_encoder_causal_mask": false,
"use_gradient_checkpointing": false
},
"block_size": 8,
"bos_token_id": 151643,
"diffusion_type": "absorbing",
"eos_token_id": 151643,
"eval_block_size": 8,
"keep_clean_bos": true,
"length": 1024,
"mask_token_id": 151660,
"model_type": "e2d2",
"noise_config": {
"_target_": "noise_schedule_noise_schedules.LinearNoise"
},
"pad_token_id": 151643,
"pad_vocab_size_multiple": 1,
"shift_logits": false,
"time_conditioned_backbone": false,
"tokenization_config": {
"bos_token_id": 151643,
"eos_token_id": 151643,
"mask_token_id": 151660,
"pad_token_id": 151643,
"pad_vocab_size_multiple": 1,
"vocab_size": 151669
},
"tokenizer_name": "Qwen/Qwen3-0.6B-Base",
"torch_dtype": "float32",
"train_on_context": false,
"transformers_version": "4.52.4",
"vocab_size": 151669
}