| { |
| "model_type": "gpt_bigcode", |
| "quantization": "q4f32_1", |
| "model_config": { |
| "n_embd": 2048, |
| "n_inner": 8192, |
| "n_head": 16, |
| "n_layer": 24, |
| "n_positions": 2048, |
| "layer_norm_epsilon": 1e-05, |
| "vocab_size": 49280, |
| "context_window_size": 2048, |
| "prefill_chunk_size": 2048, |
| "tensor_parallel_shards": 1 |
| }, |
| "vocab_size": 49280, |
| "context_window_size": 2048, |
| "sliding_window_size": -1, |
| "prefill_chunk_size": 2048, |
| "attention_sink_size": -1, |
| "tensor_parallel_shards": 1, |
| "max_batch_size": 80, |
| "mean_gen_len": 128, |
| "max_gen_len": 512, |
| "shift_fill_factor": 0.3, |
| "temperature": 0.7, |
| "repetition_penalty": 1.0, |
| "top_p": 0.95, |
| "conv_template": "LM", |
| "pad_token_id": 0, |
| "bos_token_id": 49152, |
| "eos_token_id": 49152, |
| "tokenizer_files": [ |
| "tokenizer.json", |
| "tokenizer_config.json" |
| ], |
| "version": "0.1.0" |
| } |