File size: 1,220 Bytes
fdb3f63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
{
  "_serialized_key": "NxDNeuronConfig",
  "async_mode": false,
  "attn_kernel_enabled": false,
  "batch_size": 1,
  "capacity_factor": null,
  "cc_pipeline_tiling_factor": 2,
  "checkpoint_id": "HuggingFaceTB/SmolLM2-135M-Instruct",
  "checkpoint_revision": "a91318be21aeaf0879874faa161dcb40c68847e9",
  "continuous_batching": false,
  "enable_bucketing": false,
  "ep_degree": 1,
  "flash_decoding_enabled": false,
  "fused_qkv": true,
  "glu_mlp": true,
  "is_chunked_prefill": false,
  "local_ranks_size": 12,
  "logical_nc_config": 1,
  "max_batch_size": 1,
  "max_context_length": 128,
  "max_topk": 256,
  "mlp_kernel_enabled": false,
  "mlp_kernel_fuse_residual_add": false,
  "n_active_tokens": 128,
  "neuronxcc_version": "2.19.8089.0+8ab9f450",
  "num_cores_per_group": 1,
  "on_device_sampling": true,
  "optimum_neuron_version": "0.3.0",
  "output_logits": false,
  "padding_side": "right",
  "pp_degree": 1,
  "qk_layernorm": false,
  "qkv_kernel_enabled": false,
  "rpl_reduce_dtype": "bfloat16",
  "sequence_length": 128,
  "sequence_parallel_enabled": false,
  "speculation_length": 0,
  "start_rank_id": 0,
  "target": null,
  "torch_dtype": "bfloat16",
  "tp_degree": 12,
  "vocab_parallel": false
}