Commit ·
31271b3
0
Parent(s):
MiniMax-M2.7 REAP-172B-A10B AutoRound W4A16 — initial release
Browse files- .gitattributes +37 -0
- LICENSE +29 -0
- README.md +224 -0
- added_tokens.json +56 -0
- chat_template.jinja +159 -0
- config.json +374 -0
- configuration_minimax_m2.py +200 -0
- generation_config.json +8 -0
- merges.txt +0 -0
- model-00001-of-00023.safetensors +3 -0
- model-00002-of-00023.safetensors +3 -0
- model-00003-of-00023.safetensors +3 -0
- model-00004-of-00023.safetensors +3 -0
- model-00005-of-00023.safetensors +3 -0
- model-00006-of-00023.safetensors +3 -0
- model-00007-of-00023.safetensors +3 -0
- model-00008-of-00023.safetensors +3 -0
- model-00009-of-00023.safetensors +3 -0
- model-00010-of-00023.safetensors +3 -0
- model-00011-of-00023.safetensors +3 -0
- model-00012-of-00023.safetensors +3 -0
- model-00013-of-00023.safetensors +3 -0
- model-00014-of-00023.safetensors +3 -0
- model-00015-of-00023.safetensors +3 -0
- model-00016-of-00023.safetensors +3 -0
- model-00017-of-00023.safetensors +3 -0
- model-00018-of-00023.safetensors +3 -0
- model-00019-of-00023.safetensors +3 -0
- model-00020-of-00023.safetensors +3 -0
- model-00021-of-00023.safetensors +3 -0
- model-00022-of-00023.safetensors +3 -0
- model-00023-of-00023.safetensors +3 -0
- model.safetensors.index.json +3 -0
- modeling_minimax_m2.py +733 -0
- quantization_config.json +263 -0
- special_tokens_map.json +75 -0
- tokenizer.json +3 -0
- tokenizer_config.json +496 -0
- vocab.json +0 -0
.gitattributes
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
model.safetensors.index.json filter=lfs diff=lfs merge=lfs -text
|
LICENSE
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# NON-COMMERCIAL LICENSE
|
| 2 |
+
|
| 3 |
+
Non-commercial use permitted based on MIT-style terms; commercial use requires prior written authorization.
|
| 4 |
+
|
| 5 |
+
Copyright (c) 2026 MiniMax
|
| 6 |
+
|
| 7 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software for non-commercial purposes, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or provide copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
| 8 |
+
|
| 9 |
+
1. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
| 10 |
+
|
| 11 |
+
2. If the Software (or any derivative works thereof) is used for any Commercial Use, you shall prominently display "Built with MiniMax M2.7" on a related website, user interface, blogpost, about page or product documentation.
|
| 12 |
+
|
| 13 |
+
3. Any Commercial Use of the Software or any derivative work thereof is prohibited without obtaining a separate, prior written authorization from MiniMax. To request such authorization, please contact api@minimax.io with the subject line "M2.7 licensing".
|
| 14 |
+
|
| 15 |
+
4. "Commercial Use" means any use of the Software or any derivative work thereof that is primarily intended for commercial advantage or monetary compensation, which includes, without limitation: (i) offering products or services to third parties for a fee, which utilize, incorporate, or rely on the Software or its derivatives, (ii) the commercial use of APIs provided by or for the Software or its derivatives, including to support or enable commercial products, services, or operations, whether in a cloud-based, hosted, or other similar environment, and (iii) the deployment or provision of the Software or its derivatives that have been subjected to post-training, fine-tuning, instruction-tuning, or any other form of modification, for any commercial purpose.
|
| 16 |
+
|
| 17 |
+
5. Permitted Free Uses. The following uses are expressly permitted free of charge: (a) personal use, including self-hosted deployment for coding, development of applications, agents, tools, integrations, research, experimentation, or other personal purposes; (b) use by non-profit organizations, academic institutions, and researchers for non-commercial research or educational purposes; (c) modification of the Software solely for the uses described in (a) or (b) above.
|
| 18 |
+
|
| 19 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
| 20 |
+
|
| 21 |
+
## Appendix: Prohibited Uses
|
| 22 |
+
|
| 23 |
+
You agree you will not use, or allow others to use, the Software or any derivatives of the Software to:
|
| 24 |
+
|
| 25 |
+
1. Generate or disseminate content prohibited by applicable laws or regulations.
|
| 26 |
+
2. Assist with, engage in or otherwise support any military purpose.
|
| 27 |
+
3. Exploit, harm, or attempt to exploit or harm minors.
|
| 28 |
+
4. Generate or disseminate false or misleading information with the intent to cause harm.
|
| 29 |
+
5. Promote discrimination, hate speech, or harmful behavior against individuals or groups based on race or ethnic origin, religion, disability, age, nationality and national origin, veteran status, sexual orientation, gender or gender identity, caste, immigration status, or any other characteristic that is associated with systemic discrimination or marginalization.
|
README.md
ADDED
|
@@ -0,0 +1,224 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
base_model: MiniMaxAI/MiniMax-M2.7
|
| 3 |
+
base_model_relation: quantized
|
| 4 |
+
library_name: transformers
|
| 5 |
+
pipeline_tag: text-generation
|
| 6 |
+
license: other
|
| 7 |
+
license_name: minimax-m2.7-license
|
| 8 |
+
license_link: LICENSE
|
| 9 |
+
language:
|
| 10 |
+
- en
|
| 11 |
+
tags:
|
| 12 |
+
- moe
|
| 13 |
+
- mixture-of-experts
|
| 14 |
+
- quantization
|
| 15 |
+
- auto-round
|
| 16 |
+
- autoround
|
| 17 |
+
- w4a16
|
| 18 |
+
- int4
|
| 19 |
+
- gptq
|
| 20 |
+
- reap
|
| 21 |
+
- pruned
|
| 22 |
+
- minimax
|
| 23 |
+
- minimax-m2
|
| 24 |
+
- blackwell
|
| 25 |
+
- dgx-spark
|
| 26 |
+
- vllm
|
| 27 |
+
datasets:
|
| 28 |
+
- theblackcat102/evol-codealpaca-v1
|
| 29 |
+
---
|
| 30 |
+
|
| 31 |
+
# MiniMax-M2.7 REAP-172B-A10B AutoRound W4A16
|
| 32 |
+
|
| 33 |
+
> **⚠ Experimental proof-of-concept — quality caveat**
|
| 34 |
+
>
|
| 35 |
+
> This checkpoint was produced as a **test of the end-to-end REAP → AutoRound → vLLM pipeline on a single NVIDIA GB10 / DGX Spark**. Because the calibration machine had limited memory and wall-clock budget, the REAP expert-saliency pass was run with only **64 calibration sequences** from `theblackcat102/evol-codealpaca-v1` at sequence length 512. Downstream quality is still being evaluated.
|
| 36 |
+
|
| 37 |
+
A **REAP-pruned** and **AutoRound W4A16-quantized** variant of MiniMax-M2.7.
|
| 38 |
+
The original 256-expert-per-layer MoE has been reduced to **192 experts per
|
| 39 |
+
layer (25 % compression)** using **REAP expert saliency pruning**, then
|
| 40 |
+
quantized to **4-bit weights / 16-bit activations** with Intel AutoRound.
|
| 41 |
+
The result is an **~86 GiB checkpoint** that runs comfortably on a single
|
| 42 |
+
NVIDIA **GB10 / DGX Spark** (128 GiB unified memory), as well as on any
|
| 43 |
+
CUDA GPU with vLLM's Marlin / GPTQ-Marlin W4A16 kernel.
|
| 44 |
+
|
| 45 |
+
- **Base model**: [`MiniMaxAI/MiniMax-M2`](https://huggingface.co/MiniMaxAI/MiniMax-M2) family (MiniMaxM2 architecture)
|
| 46 |
+
- **Pruning**: REAP (Cerebras) — 25 % compression → 192/256 experts per layer
|
| 47 |
+
- **Calibration**: `theblackcat102/evol-codealpaca-v1`, 64 samples, seed 42
|
| 48 |
+
- **Quantization**: AutoRound 0.12.2, W4A16, group size 128, symmetric,
|
| 49 |
+
GPTQ packing format. MoE router gates, embeddings, layer norms, and
|
| 50 |
+
`lm_head` kept at bf16/fp16.
|
| 51 |
+
- **Architecture**: 62 transformer layers, 192 experts/layer, top-8 routing,
|
| 52 |
+
hidden size 3072, 48 attention heads, 8 KV heads
|
| 53 |
+
- **Total parameters**: ~172 B (A10B — ~10 B activated per token)
|
| 54 |
+
- **Disk size**: ~86 GiB (23 safetensors shards)
|
| 55 |
+
|
| 56 |
+
## Quick start
|
| 57 |
+
|
| 58 |
+
### vLLM (recommended)
|
| 59 |
+
|
| 60 |
+
vLLM uses its own built-in `minimax_m2` model implementation with
|
| 61 |
+
**FlashInfer attention** and the **GPTQ-Marlin W4A16 kernel**, so this
|
| 62 |
+
checkpoint runs out of the box. Tested on **vLLM 0.17.1.dev0** (container:
|
| 63 |
+
[`scitrera/dgx-spark-vllm:0.17.0-t5`](https://hub.docker.com/r/scitrera/dgx-spark-vllm/tags),
|
| 64 |
+
vLLM 0.17.1.dev0 + transformers 5.3.0 on Blackwell arm64).
|
| 65 |
+
|
| 66 |
+
```bash
|
| 67 |
+
vllm serve MJPansa/MiniMax-M2.7-REAP-172B-A10B-AutoRound-W4A16 \
|
| 68 |
+
--host 0.0.0.0 --port 8000 \
|
| 69 |
+
--trust-remote-code \
|
| 70 |
+
--max-model-len 32768 \
|
| 71 |
+
--gpu-memory-utilization 0.80 \
|
| 72 |
+
--kv-cache-dtype fp8 \
|
| 73 |
+
--load-format fastsafetensors \
|
| 74 |
+
--enable-auto-tool-choice \
|
| 75 |
+
--tool-call-parser minimax_m2 \
|
| 76 |
+
--reasoning-parser minimax_m2_append_think
|
| 77 |
+
```
|
| 78 |
+
|
| 79 |
+
Example request:
|
| 80 |
+
|
| 81 |
+
```bash
|
| 82 |
+
curl -sS http://localhost:8000/v1/chat/completions \
|
| 83 |
+
-H 'Content-Type: application/json' \
|
| 84 |
+
-d '{
|
| 85 |
+
"model": "MJPansa/MiniMax-M2.7-REAP-172B-A10B-AutoRound-W4A16",
|
| 86 |
+
"messages": [
|
| 87 |
+
{"role": "user", "content": "Write a Python function that returns the two integers in a list whose sum is closest to zero."}
|
| 88 |
+
],
|
| 89 |
+
"temperature": 0,
|
| 90 |
+
"max_tokens": 512
|
| 91 |
+
}' | jq '.choices[0].message.content'
|
| 92 |
+
```
|
| 93 |
+
|
| 94 |
+
**DGX Spark / GB10 note**: if you serve on a single 128 GiB unified-memory
|
| 95 |
+
node, leave at least ~20 GiB to the host OS + display / other services
|
| 96 |
+
before starting vLLM, otherwise the OOM killer may reclaim the engine
|
| 97 |
+
process during CUDA graph capture. The flags above were validated in that
|
| 98 |
+
configuration.
|
| 99 |
+
|
| 100 |
+
### HuggingFace Transformers
|
| 101 |
+
|
| 102 |
+
```python
|
| 103 |
+
import torch
|
| 104 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 105 |
+
|
| 106 |
+
repo = "MJPansa/MiniMax-M2.7-REAP-172B-A10B-AutoRound-W4A16"
|
| 107 |
+
tok = AutoTokenizer.from_pretrained(repo, trust_remote_code=True)
|
| 108 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 109 |
+
repo,
|
| 110 |
+
trust_remote_code=True,
|
| 111 |
+
torch_dtype=torch.bfloat16,
|
| 112 |
+
device_map={"": "cuda:0"},
|
| 113 |
+
)
|
| 114 |
+
model.config.use_cache = False # pure-HF path needs this; vLLM is unaffected
|
| 115 |
+
|
| 116 |
+
prompt = "The capital of France is"
|
| 117 |
+
inputs = tok(prompt, return_tensors="pt").to("cuda:0")
|
| 118 |
+
with torch.no_grad():
|
| 119 |
+
out = model.generate(**inputs, max_new_tokens=64, do_sample=False, use_cache=False)
|
| 120 |
+
print(tok.decode(out[0], skip_special_tokens=True))
|
| 121 |
+
```
|
| 122 |
+
|
| 123 |
+
`trust_remote_code=True` is required because the checkpoint ships its own
|
| 124 |
+
`modeling_minimax_m2.py` that uses the per-expert `w1/w2/w3` layout. The
|
| 125 |
+
bundled modeling file includes a small compat shim so it works on **both
|
| 126 |
+
transformers 4.55+ and transformers 5.x**.
|
| 127 |
+
|
| 128 |
+
## Benchmarks (llama-benchy, vLLM 0.17.1 on GB10)
|
| 129 |
+
|
| 130 |
+
Measured with `uvx llama-benchy --latency-mode generation --skip-coherence`
|
| 131 |
+
against the vLLM server running with the launch flags above
|
| 132 |
+
(`--kv-cache-dtype fp8`, `--max-model-len 32768`). Single request, batch
|
| 133 |
+
size 1, prefill length 2048 tokens at each reported depth, 32 decode
|
| 134 |
+
tokens. Values are mean ± stddev over 3 runs.
|
| 135 |
+
|
| 136 |
+
| depth | prefill tok/s | decode tok/s | TTFT (ms) |
|
| 137 |
+
|------:|--------------:|-------------:|----------:|
|
| 138 |
+
| 0 | 2469.3 ± 13.3 | 29.28 ± 0.05 | 864.5 |
|
| 139 |
+
| 4096 | 2089.9 ± 12.5 | 27.73 ± 0.05 | 2784.8 |
|
| 140 |
+
| 8192 | 1890.3 ± 5.2 | 26.28 ± 0.05 | 5062.3 |
|
| 141 |
+
| 16384 | 1601.1 ± 6.5 | 23.88 ± 0.05 | 10647.7 |
|
| 142 |
+
|
| 143 |
+
Decode throughput holds within ~18 % across a 16 K-token prefix — the
|
| 144 |
+
pruned MoE routing is stable under longer context on this quantization
|
| 145 |
+
format.
|
| 146 |
+
|
| 147 |
+
## Pruning methodology (REAP)
|
| 148 |
+
|
| 149 |
+
Starting from the full 256-experts-per-layer MoE, we ran REAP over 62
|
| 150 |
+
layers using 64 calibration sequences from
|
| 151 |
+
[`theblackcat102/evol-codealpaca-v1`](https://huggingface.co/datasets/theblackcat102/evol-codealpaca-v1)
|
| 152 |
+
(seed 42, max sequence length 512) to collect per-expert activation
|
| 153 |
+
saliency, then dropped the lowest-saliency 25 % of experts per layer
|
| 154 |
+
(64 per layer × 62 layers = 3 968 experts removed), leaving **192
|
| 155 |
+
experts per layer**. Router gates were re-projected to the reduced expert
|
| 156 |
+
index space. No further fine-tuning was applied.
|
| 157 |
+
|
| 158 |
+
REAP reference: see [cerebras/MiniMax-M2-REAP-172B-A10B](https://huggingface.co/cerebras/MiniMax-M2-REAP-172B-A10B)
|
| 159 |
+
and the REAP paper from Cerebras Research.
|
| 160 |
+
|
| 161 |
+
## Quantization methodology (AutoRound)
|
| 162 |
+
|
| 163 |
+
The pruned bf16 model was quantized to W4A16 with **Intel AutoRound
|
| 164 |
+
0.12.2** in OPT-RTN mode (`iters=0`, no AdaRound search), `group_size=128`,
|
| 165 |
+
symmetric, GPTQ packing format. The MoE **router gates for all 62 layers**,
|
| 166 |
+
embeddings, norms, and `lm_head` were kept at fp16/bf16 via the
|
| 167 |
+
`extra_config` exemption list, so only FFN expert projections and
|
| 168 |
+
attention q/k/v/o projections are int4. Under vLLM this dispatches to the
|
| 169 |
+
`GPTQMarlinLinearMethod` + `MarlinLinearKernel` path automatically.
|
| 170 |
+
|
| 171 |
+
## Files in this repo
|
| 172 |
+
|
| 173 |
+
```
|
| 174 |
+
config.json # model config (backend=auto for vLLM)
|
| 175 |
+
generation_config.json
|
| 176 |
+
quantization_config.json # AutoRound W4A16 sidecar
|
| 177 |
+
configuration_minimax_m2.py # custom config class
|
| 178 |
+
modeling_minimax_m2.py # modeling file with tf5 ROPE compat shim
|
| 179 |
+
tokenizer.json
|
| 180 |
+
tokenizer_config.json
|
| 181 |
+
special_tokens_map.json
|
| 182 |
+
added_tokens.json
|
| 183 |
+
vocab.json
|
| 184 |
+
merges.txt
|
| 185 |
+
chat_template.jinja
|
| 186 |
+
model.safetensors.index.json
|
| 187 |
+
model-0000{1..23}-of-00023.safetensors
|
| 188 |
+
```
|
| 189 |
+
|
| 190 |
+
## License
|
| 191 |
+
|
| 192 |
+
This derivative inherits the **MiniMax-M2.7 Non-Commercial License** from
|
| 193 |
+
the upstream [`MiniMaxAI/MiniMax-M2.7`](https://huggingface.co/MiniMaxAI/MiniMax-M2.7).
|
| 194 |
+
See [`LICENSE`](LICENSE) for the full text. Key points:
|
| 195 |
+
|
| 196 |
+
- **Non-commercial use is free** for personal, research, academic, and
|
| 197 |
+
non-profit purposes, including self-hosted deployment, experimentation,
|
| 198 |
+
and educational use.
|
| 199 |
+
- **Commercial use requires prior written authorization** from MiniMax —
|
| 200 |
+
contact `api@minimax.io` with the subject line "M2.7 licensing".
|
| 201 |
+
- **Derivative works** (including REAP-pruned and quantized variants such
|
| 202 |
+
as this one) are covered by the same non-commercial terms.
|
| 203 |
+
- If you obtain commercial authorization and deploy this model, you must
|
| 204 |
+
prominently display **"Built with MiniMax M2.7"** on the related website,
|
| 205 |
+
user interface, blog post, about page, or product documentation.
|
| 206 |
+
- Prohibited uses include: illegal content, military applications, harming
|
| 207 |
+
minors, generating harmful misinformation, and promoting discrimination
|
| 208 |
+
or hate speech. See the `Appendix: Prohibited Uses` section of `LICENSE`
|
| 209 |
+
for the full list.
|
| 210 |
+
|
| 211 |
+
## Credits
|
| 212 |
+
|
| 213 |
+
- **MiniMaxAI** for the MiniMax-M2 base model and architecture
|
| 214 |
+
- **Cerebras Research** for the REAP expert pruning methodology
|
| 215 |
+
- **Intel Neural Compressor team** for AutoRound
|
| 216 |
+
- **[scitrera/dgx-spark-vllm](https://hub.docker.com/r/scitrera/dgx-spark-vllm/tags)**
|
| 217 |
+
for the prebuilt vLLM arm64 container that made serving this model on
|
| 218 |
+
GB10 trivially reproducible
|
| 219 |
+
- **vLLM project** for the Marlin W4A16 and FlashInfer attention kernels
|
| 220 |
+
|
| 221 |
+
## Citation
|
| 222 |
+
|
| 223 |
+
If you use this model, please cite the base MiniMax-M2 release, the REAP
|
| 224 |
+
paper, and AutoRound.
|
added_tokens.json
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"</minimax:tool_call>": 200053,
|
| 3 |
+
"</think>": 200051,
|
| 4 |
+
"<add_file>": 200036,
|
| 5 |
+
"<code_context>": 200043,
|
| 6 |
+
"<code_interpreter>": 200023,
|
| 7 |
+
"<commit_after>": 200018,
|
| 8 |
+
"<commit_before>": 200016,
|
| 9 |
+
"<commit_message>": 200040,
|
| 10 |
+
"<commit_msg>": 200017,
|
| 11 |
+
"<delete_file>": 200037,
|
| 12 |
+
"<edit_file>": 200039,
|
| 13 |
+
"<empty_output>": 200015,
|
| 14 |
+
"<empty_source_file>": 200041,
|
| 15 |
+
"<file_content>": 200044,
|
| 16 |
+
"<file_sep>": 200049,
|
| 17 |
+
"<filename>": 200006,
|
| 18 |
+
"<filepath>": 200048,
|
| 19 |
+
"<fim_middle>": 200002,
|
| 20 |
+
"<fim_pad>": 200004,
|
| 21 |
+
"<fim_prefix>": 200001,
|
| 22 |
+
"<fim_suffix>": 200003,
|
| 23 |
+
"<function_call>": 200022,
|
| 24 |
+
"<gh_stars>": 200007,
|
| 25 |
+
"<issue_closed>": 200010,
|
| 26 |
+
"<issue_comment>": 200009,
|
| 27 |
+
"<issue_start>": 200008,
|
| 28 |
+
"<jupyter_code>": 200013,
|
| 29 |
+
"<jupyter_error>": 200035,
|
| 30 |
+
"<jupyter_output>": 200014,
|
| 31 |
+
"<jupyter_start>": 200011,
|
| 32 |
+
"<jupyter_text>": 200012,
|
| 33 |
+
"<minimax:tool_call>": 200052,
|
| 34 |
+
"<pr_start>": 200046,
|
| 35 |
+
"<rename_file>": 200038,
|
| 36 |
+
"<repo_struct>": 200042,
|
| 37 |
+
"<reponame>": 200005,
|
| 38 |
+
"<review_comment>": 200047,
|
| 39 |
+
"<source_files>": 200045,
|
| 40 |
+
"<think>": 200050,
|
| 41 |
+
"[e~[": 200020,
|
| 42 |
+
"]!d~[": 200021,
|
| 43 |
+
"]!p~[": 200000,
|
| 44 |
+
"]<]end of image[>[": 200030,
|
| 45 |
+
"]<]end of speech[>[": 200028,
|
| 46 |
+
"]<]end of video[>[": 200032,
|
| 47 |
+
"]<]image[>[": 200025,
|
| 48 |
+
"]<]speech[>[": 200024,
|
| 49 |
+
"]<]start of image[>[": 200029,
|
| 50 |
+
"]<]start of speech[>[": 200027,
|
| 51 |
+
"]<]start of video[>[": 200031,
|
| 52 |
+
"]<]video[>[": 200026,
|
| 53 |
+
"]<]vision pad[>[": 200033,
|
| 54 |
+
"]~!b[": 200034,
|
| 55 |
+
"]~b]": 200019
|
| 56 |
+
}
|
chat_template.jinja
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{# ----------‑‑‑ special token variables ‑‑‑---------- #}
|
| 2 |
+
{%- set toolcall_begin_token = '<minimax:tool_call>' -%}
|
| 3 |
+
{%- set toolcall_end_token = '</minimax:tool_call>' -%}
|
| 4 |
+
{#- Tool Rendering Functions ============================================== -#}
|
| 5 |
+
{%- macro render_tool_namespace(namespace_name, tool_list) -%}
|
| 6 |
+
{%- for tool in tool_list -%}
|
| 7 |
+
<tool>{{ tool.function | tojson(ensure_ascii=False) }}</tool>
|
| 8 |
+
{% endfor -%}
|
| 9 |
+
{%- endmacro -%}
|
| 10 |
+
{%- macro visible_text(content) -%}
|
| 11 |
+
{%- if content is string -%}
|
| 12 |
+
{{ content }}
|
| 13 |
+
{%- elif content is iterable and content is not mapping -%}
|
| 14 |
+
{%- for item in content -%}
|
| 15 |
+
{%- if item is mapping and item.type == 'text' -%}
|
| 16 |
+
{{- item.text }}
|
| 17 |
+
{%- elif item is string -%}
|
| 18 |
+
{{- item }}
|
| 19 |
+
{%- endif -%}
|
| 20 |
+
{%- endfor -%}
|
| 21 |
+
{%- else -%}
|
| 22 |
+
{{- content }}
|
| 23 |
+
{%- endif -%}
|
| 24 |
+
{%- endmacro -%}
|
| 25 |
+
{#- System Message Construction ============================================ -#}
|
| 26 |
+
{%- macro build_system_message(system_message) -%}
|
| 27 |
+
{%- if system_message and system_message.content -%}
|
| 28 |
+
{{- visible_text(system_message.content) }}
|
| 29 |
+
{%- else -%}
|
| 30 |
+
{%- if model_identity is not defined -%}
|
| 31 |
+
{%- set model_identity = "You are a helpful assistant. Your name is MiniMax-M2.7 and is built by MiniMax." -%}
|
| 32 |
+
{%- endif -%}
|
| 33 |
+
{{- model_identity }}
|
| 34 |
+
{%- endif -%}
|
| 35 |
+
|
| 36 |
+
{#- Handle current_date -#}
|
| 37 |
+
{%- if system_message and system_message.current_date -%}
|
| 38 |
+
{{- '\n' ~ 'Current date: ' + system_message.current_date }}
|
| 39 |
+
{%- endif -%}
|
| 40 |
+
{#- Handle current_location -#}
|
| 41 |
+
{%- if system_message and system_message.current_location -%}
|
| 42 |
+
{{- '\n' ~ 'Current location: ' + system_message.current_location }}
|
| 43 |
+
{%- endif -%}
|
| 44 |
+
{%- endmacro -%}
|
| 45 |
+
{#- Main Template Logic ================================================= -#}
|
| 46 |
+
{#- Extract system message (only first message if it's system) -#}
|
| 47 |
+
{%- set system_message = none -%}
|
| 48 |
+
{%- set conversation_messages = messages -%}
|
| 49 |
+
{%- if messages and messages[0].role == "system" -%}
|
| 50 |
+
{%- set system_message = messages[0] -%}
|
| 51 |
+
{%- set conversation_messages = messages[1:] -%}
|
| 52 |
+
{%- endif -%}
|
| 53 |
+
{#- Get the last user message turn, for interleved thinking -#}
|
| 54 |
+
{%- set ns = namespace(last_user_index=-1) %}
|
| 55 |
+
{% for m in conversation_messages %}
|
| 56 |
+
{%- if m.role == 'user' %}
|
| 57 |
+
{% set ns.last_user_index = loop.index0 -%}
|
| 58 |
+
{%- endif %}
|
| 59 |
+
{%- endfor %}
|
| 60 |
+
{#- Render system message -#}
|
| 61 |
+
{{- ']~!b[' ~ ']~b]system' ~ '\n' }}
|
| 62 |
+
{{- build_system_message(system_message) }}
|
| 63 |
+
{#- Render tools if available -#}
|
| 64 |
+
{%- if tools -%}
|
| 65 |
+
{{- '\n\n' ~ '# Tools' ~ '\n' ~ 'You may call one or more tools to assist with the user query.\nHere are the tools available in JSONSchema format:' ~ '\n' }}
|
| 66 |
+
{{- '\n' ~ '<tools>' ~ '\n' }}
|
| 67 |
+
{{- render_tool_namespace("functions", tools) }}
|
| 68 |
+
{{- '</tools>' ~ '\n\n' }}
|
| 69 |
+
{{- 'When making tool calls, use XML format to invoke tools and pass parameters:' ~ '\n' }}
|
| 70 |
+
{{- '\n' ~ toolcall_begin_token }}
|
| 71 |
+
<invoke name="tool-name-1">
|
| 72 |
+
<parameter name="param-key-1">param-value-1</parameter>
|
| 73 |
+
<parameter name="param-key-2">param-value-2</parameter>
|
| 74 |
+
...
|
| 75 |
+
</invoke>
|
| 76 |
+
{{- '\n' ~ toolcall_end_token }}
|
| 77 |
+
{%- endif -%}
|
| 78 |
+
{{- '[e~[\n' }}
|
| 79 |
+
|
| 80 |
+
{#- Render messages -#}
|
| 81 |
+
{%- set last_tool_call = namespace(name=none) -%}
|
| 82 |
+
{%- for message in conversation_messages -%}
|
| 83 |
+
{%- if message.role == 'assistant' -%}
|
| 84 |
+
{#- Only render reasoning_content if no user message follows -#}
|
| 85 |
+
{{- ']~b]ai' ~ '\n' }}
|
| 86 |
+
|
| 87 |
+
{%- set reasoning_content = '' %}
|
| 88 |
+
{%- set content = visible_text(message.content) %}
|
| 89 |
+
{%- if message.reasoning_content is string %}
|
| 90 |
+
{%- set reasoning_content = message.reasoning_content %}
|
| 91 |
+
{%- else %}
|
| 92 |
+
{%- if '</think>' in content %}
|
| 93 |
+
{%- set reasoning_content = content.split('</think>')[0].strip('\n').split('<think>')[-1].strip('\n') %}
|
| 94 |
+
{%- set content = content.split('</think>')[-1].strip('\n') %}
|
| 95 |
+
{%- endif %}
|
| 96 |
+
{%- endif %}
|
| 97 |
+
{%- if reasoning_content and loop.index0 > ns.last_user_index -%}
|
| 98 |
+
{{- '<think>' ~ '\n' ~ reasoning_content ~ '\n' ~ '</think>' ~ '\n\n' }}
|
| 99 |
+
{%- endif -%}
|
| 100 |
+
{%- if content -%}
|
| 101 |
+
{{- content }}
|
| 102 |
+
{%- endif -%}
|
| 103 |
+
{%- if message.tool_calls -%}
|
| 104 |
+
{{- '\n' ~ toolcall_begin_token ~ '\n' }}
|
| 105 |
+
|
| 106 |
+
{%- for tool_call in message.tool_calls -%}
|
| 107 |
+
{%- if tool_call.function %}
|
| 108 |
+
{%- set tool_call = tool_call.function %}
|
| 109 |
+
{%- endif %}
|
| 110 |
+
{{- '<invoke name="' + tool_call.name + '">' }}
|
| 111 |
+
{% set _args = tool_call.arguments %}
|
| 112 |
+
{%- for k, v in _args.items() %}
|
| 113 |
+
{{- '<parameter name="' + k + '">' }}
|
| 114 |
+
{{- v | tojson(ensure_ascii=False) if v is not string else v }}
|
| 115 |
+
{{- '</parameter>' }}
|
| 116 |
+
{% endfor %}
|
| 117 |
+
{{- '</invoke>' ~ '\n' }}
|
| 118 |
+
{%- endfor -%}
|
| 119 |
+
|
| 120 |
+
{{- toolcall_end_token}}
|
| 121 |
+
{%- set last_tool_call.name = message.tool_calls[-1].name -%}
|
| 122 |
+
{%- else -%}
|
| 123 |
+
{%- set last_tool_call.name = none -%}
|
| 124 |
+
{%- endif -%}
|
| 125 |
+
{{- '[e~[' ~ '\n' }}
|
| 126 |
+
|
| 127 |
+
{%- elif message.role == 'tool' -%}
|
| 128 |
+
{%- if last_tool_call.name is none -%}
|
| 129 |
+
{{- raise_exception("Message has tool role, but there was no previous assistant message with a tool call!") }}
|
| 130 |
+
{%- endif -%}
|
| 131 |
+
{%- if loop.first or (conversation_messages[loop.index0 - 1].role != 'tool') -%}
|
| 132 |
+
{{- ']~b]tool' }}
|
| 133 |
+
{%- endif -%}
|
| 134 |
+
{%- if message.content is string -%}
|
| 135 |
+
{{- '\n<response>' }}
|
| 136 |
+
{{- message.content }}
|
| 137 |
+
{{- '</response>' }}
|
| 138 |
+
{%- else -%}
|
| 139 |
+
{%- for tr in message.content -%}
|
| 140 |
+
{{- '\n<response>' }}
|
| 141 |
+
{{- tr.output if tr.output is defined else (tr.text if tr.type == 'text' and tr.text is defined else tr) }}
|
| 142 |
+
{{- '\n</response>' }}
|
| 143 |
+
{%- endfor -%}
|
| 144 |
+
{%- endif -%}
|
| 145 |
+
{%- if loop.last or (conversation_messages[loop.index0 + 1].role != 'tool') -%}
|
| 146 |
+
{{- '[e~[\n' -}}
|
| 147 |
+
{%- endif -%}
|
| 148 |
+
|
| 149 |
+
{%- elif message.role == 'user' -%}
|
| 150 |
+
{{- ']~b]user' ~ '\n' }}
|
| 151 |
+
{{- visible_text(message.content) }}
|
| 152 |
+
{{- '[e~[' ~ '\n' }}
|
| 153 |
+
{%- endif -%}
|
| 154 |
+
{%- endfor -%}
|
| 155 |
+
|
| 156 |
+
{#- Generation prompt -#}
|
| 157 |
+
{%- if add_generation_prompt -%}
|
| 158 |
+
{{- ']~b]ai' ~ '\n' ~ '<think>' ~ '\n' }}
|
| 159 |
+
{%- endif -%}
|
config.json
ADDED
|
@@ -0,0 +1,374 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"MiniMaxM2ForCausalLM"
|
| 4 |
+
],
|
| 5 |
+
"attention_dropout": 0.0,
|
| 6 |
+
"attn_type_list": [
|
| 7 |
+
1,
|
| 8 |
+
1,
|
| 9 |
+
1,
|
| 10 |
+
1,
|
| 11 |
+
1,
|
| 12 |
+
1,
|
| 13 |
+
1,
|
| 14 |
+
1,
|
| 15 |
+
1,
|
| 16 |
+
1,
|
| 17 |
+
1,
|
| 18 |
+
1,
|
| 19 |
+
1,
|
| 20 |
+
1,
|
| 21 |
+
1,
|
| 22 |
+
1,
|
| 23 |
+
1,
|
| 24 |
+
1,
|
| 25 |
+
1,
|
| 26 |
+
1,
|
| 27 |
+
1,
|
| 28 |
+
1,
|
| 29 |
+
1,
|
| 30 |
+
1,
|
| 31 |
+
1,
|
| 32 |
+
1,
|
| 33 |
+
1,
|
| 34 |
+
1,
|
| 35 |
+
1,
|
| 36 |
+
1,
|
| 37 |
+
1,
|
| 38 |
+
1,
|
| 39 |
+
1,
|
| 40 |
+
1,
|
| 41 |
+
1,
|
| 42 |
+
1,
|
| 43 |
+
1,
|
| 44 |
+
1,
|
| 45 |
+
1,
|
| 46 |
+
1,
|
| 47 |
+
1,
|
| 48 |
+
1,
|
| 49 |
+
1,
|
| 50 |
+
1,
|
| 51 |
+
1,
|
| 52 |
+
1,
|
| 53 |
+
1,
|
| 54 |
+
1,
|
| 55 |
+
1,
|
| 56 |
+
1,
|
| 57 |
+
1,
|
| 58 |
+
1,
|
| 59 |
+
1,
|
| 60 |
+
1,
|
| 61 |
+
1,
|
| 62 |
+
1,
|
| 63 |
+
1,
|
| 64 |
+
1,
|
| 65 |
+
1,
|
| 66 |
+
1,
|
| 67 |
+
1,
|
| 68 |
+
1
|
| 69 |
+
],
|
| 70 |
+
"auto_map": {
|
| 71 |
+
"AutoConfig": "configuration_minimax_m2.MiniMaxM2Config",
|
| 72 |
+
"AutoModelForCausalLM": "modeling_minimax_m2.MiniMaxM2ForCausalLM"
|
| 73 |
+
},
|
| 74 |
+
"bos_token_id": 1,
|
| 75 |
+
"dtype": "bfloat16",
|
| 76 |
+
"eos_token_id": 2,
|
| 77 |
+
"head_dim": 128,
|
| 78 |
+
"hidden_act": "silu",
|
| 79 |
+
"hidden_size": 3072,
|
| 80 |
+
"initializer_range": 0.02,
|
| 81 |
+
"intermediate_size": 1536,
|
| 82 |
+
"max_position_embeddings": 196608,
|
| 83 |
+
"model_type": "minimax_m2",
|
| 84 |
+
"mtp_transformer_layers": 1,
|
| 85 |
+
"num_attention_heads": 48,
|
| 86 |
+
"num_experts_per_tok": 8,
|
| 87 |
+
"num_hidden_layers": 62,
|
| 88 |
+
"num_key_value_heads": 8,
|
| 89 |
+
"num_local_experts": 192,
|
| 90 |
+
"num_mtp_modules": 3,
|
| 91 |
+
"output_router_logits": false,
|
| 92 |
+
"partial_rotary_factor": 0.5,
|
| 93 |
+
"qk_norm_type": "per_layer",
|
| 94 |
+
"quantization_config": {
|
| 95 |
+
"autoround_version": "0.12.2",
|
| 96 |
+
"batch_size": 1,
|
| 97 |
+
"bits": 4,
|
| 98 |
+
"data_type": "int",
|
| 99 |
+
"extra_config": {
|
| 100 |
+
"model.layers.0.block_sparse_moe.gate": {
|
| 101 |
+
"bits": 16,
|
| 102 |
+
"data_type": "float"
|
| 103 |
+
},
|
| 104 |
+
"model.layers.1.block_sparse_moe.gate": {
|
| 105 |
+
"bits": 16,
|
| 106 |
+
"data_type": "float"
|
| 107 |
+
},
|
| 108 |
+
"model.layers.10.block_sparse_moe.gate": {
|
| 109 |
+
"bits": 16,
|
| 110 |
+
"data_type": "float"
|
| 111 |
+
},
|
| 112 |
+
"model.layers.11.block_sparse_moe.gate": {
|
| 113 |
+
"bits": 16,
|
| 114 |
+
"data_type": "float"
|
| 115 |
+
},
|
| 116 |
+
"model.layers.12.block_sparse_moe.gate": {
|
| 117 |
+
"bits": 16,
|
| 118 |
+
"data_type": "float"
|
| 119 |
+
},
|
| 120 |
+
"model.layers.13.block_sparse_moe.gate": {
|
| 121 |
+
"bits": 16,
|
| 122 |
+
"data_type": "float"
|
| 123 |
+
},
|
| 124 |
+
"model.layers.14.block_sparse_moe.gate": {
|
| 125 |
+
"bits": 16,
|
| 126 |
+
"data_type": "float"
|
| 127 |
+
},
|
| 128 |
+
"model.layers.15.block_sparse_moe.gate": {
|
| 129 |
+
"bits": 16,
|
| 130 |
+
"data_type": "float"
|
| 131 |
+
},
|
| 132 |
+
"model.layers.16.block_sparse_moe.gate": {
|
| 133 |
+
"bits": 16,
|
| 134 |
+
"data_type": "float"
|
| 135 |
+
},
|
| 136 |
+
"model.layers.17.block_sparse_moe.gate": {
|
| 137 |
+
"bits": 16,
|
| 138 |
+
"data_type": "float"
|
| 139 |
+
},
|
| 140 |
+
"model.layers.18.block_sparse_moe.gate": {
|
| 141 |
+
"bits": 16,
|
| 142 |
+
"data_type": "float"
|
| 143 |
+
},
|
| 144 |
+
"model.layers.19.block_sparse_moe.gate": {
|
| 145 |
+
"bits": 16,
|
| 146 |
+
"data_type": "float"
|
| 147 |
+
},
|
| 148 |
+
"model.layers.2.block_sparse_moe.gate": {
|
| 149 |
+
"bits": 16,
|
| 150 |
+
"data_type": "float"
|
| 151 |
+
},
|
| 152 |
+
"model.layers.20.block_sparse_moe.gate": {
|
| 153 |
+
"bits": 16,
|
| 154 |
+
"data_type": "float"
|
| 155 |
+
},
|
| 156 |
+
"model.layers.21.block_sparse_moe.gate": {
|
| 157 |
+
"bits": 16,
|
| 158 |
+
"data_type": "float"
|
| 159 |
+
},
|
| 160 |
+
"model.layers.22.block_sparse_moe.gate": {
|
| 161 |
+
"bits": 16,
|
| 162 |
+
"data_type": "float"
|
| 163 |
+
},
|
| 164 |
+
"model.layers.23.block_sparse_moe.gate": {
|
| 165 |
+
"bits": 16,
|
| 166 |
+
"data_type": "float"
|
| 167 |
+
},
|
| 168 |
+
"model.layers.24.block_sparse_moe.gate": {
|
| 169 |
+
"bits": 16,
|
| 170 |
+
"data_type": "float"
|
| 171 |
+
},
|
| 172 |
+
"model.layers.25.block_sparse_moe.gate": {
|
| 173 |
+
"bits": 16,
|
| 174 |
+
"data_type": "float"
|
| 175 |
+
},
|
| 176 |
+
"model.layers.26.block_sparse_moe.gate": {
|
| 177 |
+
"bits": 16,
|
| 178 |
+
"data_type": "float"
|
| 179 |
+
},
|
| 180 |
+
"model.layers.27.block_sparse_moe.gate": {
|
| 181 |
+
"bits": 16,
|
| 182 |
+
"data_type": "float"
|
| 183 |
+
},
|
| 184 |
+
"model.layers.28.block_sparse_moe.gate": {
|
| 185 |
+
"bits": 16,
|
| 186 |
+
"data_type": "float"
|
| 187 |
+
},
|
| 188 |
+
"model.layers.29.block_sparse_moe.gate": {
|
| 189 |
+
"bits": 16,
|
| 190 |
+
"data_type": "float"
|
| 191 |
+
},
|
| 192 |
+
"model.layers.3.block_sparse_moe.gate": {
|
| 193 |
+
"bits": 16,
|
| 194 |
+
"data_type": "float"
|
| 195 |
+
},
|
| 196 |
+
"model.layers.30.block_sparse_moe.gate": {
|
| 197 |
+
"bits": 16,
|
| 198 |
+
"data_type": "float"
|
| 199 |
+
},
|
| 200 |
+
"model.layers.31.block_sparse_moe.gate": {
|
| 201 |
+
"bits": 16,
|
| 202 |
+
"data_type": "float"
|
| 203 |
+
},
|
| 204 |
+
"model.layers.32.block_sparse_moe.gate": {
|
| 205 |
+
"bits": 16,
|
| 206 |
+
"data_type": "float"
|
| 207 |
+
},
|
| 208 |
+
"model.layers.33.block_sparse_moe.gate": {
|
| 209 |
+
"bits": 16,
|
| 210 |
+
"data_type": "float"
|
| 211 |
+
},
|
| 212 |
+
"model.layers.34.block_sparse_moe.gate": {
|
| 213 |
+
"bits": 16,
|
| 214 |
+
"data_type": "float"
|
| 215 |
+
},
|
| 216 |
+
"model.layers.35.block_sparse_moe.gate": {
|
| 217 |
+
"bits": 16,
|
| 218 |
+
"data_type": "float"
|
| 219 |
+
},
|
| 220 |
+
"model.layers.36.block_sparse_moe.gate": {
|
| 221 |
+
"bits": 16,
|
| 222 |
+
"data_type": "float"
|
| 223 |
+
},
|
| 224 |
+
"model.layers.37.block_sparse_moe.gate": {
|
| 225 |
+
"bits": 16,
|
| 226 |
+
"data_type": "float"
|
| 227 |
+
},
|
| 228 |
+
"model.layers.38.block_sparse_moe.gate": {
|
| 229 |
+
"bits": 16,
|
| 230 |
+
"data_type": "float"
|
| 231 |
+
},
|
| 232 |
+
"model.layers.39.block_sparse_moe.gate": {
|
| 233 |
+
"bits": 16,
|
| 234 |
+
"data_type": "float"
|
| 235 |
+
},
|
| 236 |
+
"model.layers.4.block_sparse_moe.gate": {
|
| 237 |
+
"bits": 16,
|
| 238 |
+
"data_type": "float"
|
| 239 |
+
},
|
| 240 |
+
"model.layers.40.block_sparse_moe.gate": {
|
| 241 |
+
"bits": 16,
|
| 242 |
+
"data_type": "float"
|
| 243 |
+
},
|
| 244 |
+
"model.layers.41.block_sparse_moe.gate": {
|
| 245 |
+
"bits": 16,
|
| 246 |
+
"data_type": "float"
|
| 247 |
+
},
|
| 248 |
+
"model.layers.42.block_sparse_moe.gate": {
|
| 249 |
+
"bits": 16,
|
| 250 |
+
"data_type": "float"
|
| 251 |
+
},
|
| 252 |
+
"model.layers.43.block_sparse_moe.gate": {
|
| 253 |
+
"bits": 16,
|
| 254 |
+
"data_type": "float"
|
| 255 |
+
},
|
| 256 |
+
"model.layers.44.block_sparse_moe.gate": {
|
| 257 |
+
"bits": 16,
|
| 258 |
+
"data_type": "float"
|
| 259 |
+
},
|
| 260 |
+
"model.layers.45.block_sparse_moe.gate": {
|
| 261 |
+
"bits": 16,
|
| 262 |
+
"data_type": "float"
|
| 263 |
+
},
|
| 264 |
+
"model.layers.46.block_sparse_moe.gate": {
|
| 265 |
+
"bits": 16,
|
| 266 |
+
"data_type": "float"
|
| 267 |
+
},
|
| 268 |
+
"model.layers.47.block_sparse_moe.gate": {
|
| 269 |
+
"bits": 16,
|
| 270 |
+
"data_type": "float"
|
| 271 |
+
},
|
| 272 |
+
"model.layers.48.block_sparse_moe.gate": {
|
| 273 |
+
"bits": 16,
|
| 274 |
+
"data_type": "float"
|
| 275 |
+
},
|
| 276 |
+
"model.layers.49.block_sparse_moe.gate": {
|
| 277 |
+
"bits": 16,
|
| 278 |
+
"data_type": "float"
|
| 279 |
+
},
|
| 280 |
+
"model.layers.5.block_sparse_moe.gate": {
|
| 281 |
+
"bits": 16,
|
| 282 |
+
"data_type": "float"
|
| 283 |
+
},
|
| 284 |
+
"model.layers.50.block_sparse_moe.gate": {
|
| 285 |
+
"bits": 16,
|
| 286 |
+
"data_type": "float"
|
| 287 |
+
},
|
| 288 |
+
"model.layers.51.block_sparse_moe.gate": {
|
| 289 |
+
"bits": 16,
|
| 290 |
+
"data_type": "float"
|
| 291 |
+
},
|
| 292 |
+
"model.layers.52.block_sparse_moe.gate": {
|
| 293 |
+
"bits": 16,
|
| 294 |
+
"data_type": "float"
|
| 295 |
+
},
|
| 296 |
+
"model.layers.53.block_sparse_moe.gate": {
|
| 297 |
+
"bits": 16,
|
| 298 |
+
"data_type": "float"
|
| 299 |
+
},
|
| 300 |
+
"model.layers.54.block_sparse_moe.gate": {
|
| 301 |
+
"bits": 16,
|
| 302 |
+
"data_type": "float"
|
| 303 |
+
},
|
| 304 |
+
"model.layers.55.block_sparse_moe.gate": {
|
| 305 |
+
"bits": 16,
|
| 306 |
+
"data_type": "float"
|
| 307 |
+
},
|
| 308 |
+
"model.layers.56.block_sparse_moe.gate": {
|
| 309 |
+
"bits": 16,
|
| 310 |
+
"data_type": "float"
|
| 311 |
+
},
|
| 312 |
+
"model.layers.57.block_sparse_moe.gate": {
|
| 313 |
+
"bits": 16,
|
| 314 |
+
"data_type": "float"
|
| 315 |
+
},
|
| 316 |
+
"model.layers.58.block_sparse_moe.gate": {
|
| 317 |
+
"bits": 16,
|
| 318 |
+
"data_type": "float"
|
| 319 |
+
},
|
| 320 |
+
"model.layers.59.block_sparse_moe.gate": {
|
| 321 |
+
"bits": 16,
|
| 322 |
+
"data_type": "float"
|
| 323 |
+
},
|
| 324 |
+
"model.layers.6.block_sparse_moe.gate": {
|
| 325 |
+
"bits": 16,
|
| 326 |
+
"data_type": "float"
|
| 327 |
+
},
|
| 328 |
+
"model.layers.60.block_sparse_moe.gate": {
|
| 329 |
+
"bits": 16,
|
| 330 |
+
"data_type": "float"
|
| 331 |
+
},
|
| 332 |
+
"model.layers.61.block_sparse_moe.gate": {
|
| 333 |
+
"bits": 16,
|
| 334 |
+
"data_type": "float"
|
| 335 |
+
},
|
| 336 |
+
"model.layers.7.block_sparse_moe.gate": {
|
| 337 |
+
"bits": 16,
|
| 338 |
+
"data_type": "float"
|
| 339 |
+
},
|
| 340 |
+
"model.layers.8.block_sparse_moe.gate": {
|
| 341 |
+
"bits": 16,
|
| 342 |
+
"data_type": "float"
|
| 343 |
+
},
|
| 344 |
+
"model.layers.9.block_sparse_moe.gate": {
|
| 345 |
+
"bits": 16,
|
| 346 |
+
"data_type": "float"
|
| 347 |
+
}
|
| 348 |
+
},
|
| 349 |
+
"group_size": 128,
|
| 350 |
+
"iters": 0,
|
| 351 |
+
"low_gpu_mem_usage": true,
|
| 352 |
+
"packing_format": "auto_round:auto_gptq",
|
| 353 |
+
"quant_method": "auto-round",
|
| 354 |
+
"seqlen": 512,
|
| 355 |
+
"sym": true,
|
| 356 |
+
"backend": "auto"
|
| 357 |
+
},
|
| 358 |
+
"rms_norm_eps": 1e-06,
|
| 359 |
+
"rope_theta": 5000000,
|
| 360 |
+
"rotary_dim": 64,
|
| 361 |
+
"router_aux_loss_coef": 0.001,
|
| 362 |
+
"router_jitter_noise": 0.0,
|
| 363 |
+
"scoring_func": "sigmoid",
|
| 364 |
+
"shared_intermediate_size": 0,
|
| 365 |
+
"sliding_window": null,
|
| 366 |
+
"tie_word_embeddings": false,
|
| 367 |
+
"torch_dtype": "float32",
|
| 368 |
+
"transformers_version": "4.55.0",
|
| 369 |
+
"use_cache": false,
|
| 370 |
+
"use_mtp": true,
|
| 371 |
+
"use_qk_norm": true,
|
| 372 |
+
"use_routing_bias": true,
|
| 373 |
+
"vocab_size": 200064
|
| 374 |
+
}
|
configuration_minimax_m2.py
ADDED
|
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
| 2 |
+
# This file was automatically generated from src/transformers/models/minimax_m2/modular_minimax_m2.py.
|
| 3 |
+
# Do NOT edit this file manually as any edits will be overwritten by the generation of
|
| 4 |
+
# the file from the modular. If any change should be done, please apply the change to the
|
| 5 |
+
# modular_minimax_m2.py file directly. One of our CI enforces this.
|
| 6 |
+
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
| 7 |
+
# coding=utf-8
|
| 8 |
+
# Copyright 2025 the HuggingFace Team. All rights reserved.
|
| 9 |
+
#
|
| 10 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 11 |
+
# you may not use this file except in compliance with the License.
|
| 12 |
+
# You may obtain a copy of the License at
|
| 13 |
+
#
|
| 14 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 15 |
+
#
|
| 16 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 17 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 18 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 19 |
+
# See the License for the specific language governing permissions and
|
| 20 |
+
# limitations under the License.
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class MiniMaxM2Config(PretrainedConfig):
|
| 27 |
+
r"""
|
| 28 |
+
This is the configuration class to store the configuration of a [`MiniMaxM2Model`]. It is used to instantiate an
|
| 29 |
+
MiniMaxM2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
| 30 |
+
with the defaults will yield a similar configuration to that of the MiniMaxM2-7B-v0.1 or MiniMaxM2-7B-Instruct-v0.1.
|
| 31 |
+
|
| 32 |
+
[minimax_m2ai/MiniMaxM2-8x7B](https://huggingface.co/minimax_m2ai/MiniMaxM2-8x7B)
|
| 33 |
+
[minimax_m2ai/MiniMaxM2-7B-Instruct-v0.1](https://huggingface.co/minimax_m2ai/MiniMaxM2-7B-Instruct-v0.1)
|
| 34 |
+
|
| 35 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 36 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
vocab_size (`int`, *optional*, defaults to 32000):
|
| 41 |
+
Vocabulary size of the MiniMaxM2 model. Defines the number of different tokens that can be represented by the
|
| 42 |
+
`inputs_ids` passed when calling [`MiniMaxM2Model`]
|
| 43 |
+
hidden_size (`int`, *optional*, defaults to 4096):
|
| 44 |
+
Dimension of the hidden representations.
|
| 45 |
+
intermediate_size (`int`, *optional*, defaults to 14336):
|
| 46 |
+
Dimension of the MLP representations.
|
| 47 |
+
num_hidden_layers (`int`, *optional*, defaults to 32):
|
| 48 |
+
Number of hidden layers in the Transformer encoder.
|
| 49 |
+
num_attention_heads (`int`, *optional*, defaults to 32):
|
| 50 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 51 |
+
num_key_value_heads (`int`, *optional*, defaults to 8):
|
| 52 |
+
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
| 53 |
+
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
| 54 |
+
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
|
| 55 |
+
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
|
| 56 |
+
by meanpooling all the original heads within that group. For more details, check out [this
|
| 57 |
+
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`.
|
| 58 |
+
head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`):
|
| 59 |
+
The attention head dimension.
|
| 60 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
| 61 |
+
The non-linear activation function (function or string) in the decoder.
|
| 62 |
+
max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
|
| 63 |
+
The maximum sequence length that this model might ever be used with. MiniMaxM2's sliding window attention
|
| 64 |
+
allows sequence of up to 4096*32 tokens.
|
| 65 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 66 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 67 |
+
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
|
| 68 |
+
The epsilon used by the rms normalization layers.
|
| 69 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
| 70 |
+
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
| 71 |
+
relevant if `config.is_decoder=True`.
|
| 72 |
+
pad_token_id (`int`, *optional*):
|
| 73 |
+
The id of the padding token.
|
| 74 |
+
bos_token_id (`int`, *optional*, defaults to 1):
|
| 75 |
+
The id of the "beginning-of-sequence" token.
|
| 76 |
+
eos_token_id (`int`, *optional*, defaults to 2):
|
| 77 |
+
The id of the "end-of-sequence" token.
|
| 78 |
+
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
| 79 |
+
Whether the model's input and output word embeddings should be tied.
|
| 80 |
+
rope_theta (`float`, *optional*, defaults to 1000000.0):
|
| 81 |
+
The base period of the RoPE embeddings.
|
| 82 |
+
sliding_window (`int`, *optional*):
|
| 83 |
+
Sliding window attention window size. If not specified, will default to `4096`.
|
| 84 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
| 85 |
+
The dropout ratio for the attention probabilities.
|
| 86 |
+
num_experts_per_tok (`int`, *optional*, defaults to 2):
|
| 87 |
+
The number of experts to route per-token, can be also interpreted as the `top-k` routing
|
| 88 |
+
parameter
|
| 89 |
+
num_local_experts (`int`, *optional*, defaults to 8):
|
| 90 |
+
Number of experts per Sparse MLP layer.
|
| 91 |
+
output_router_logits (`bool`, *optional*, defaults to `False`):
|
| 92 |
+
Whether or not the router logits should be returned by the model. Enabling this will also
|
| 93 |
+
allow the model to output the auxiliary loss. See [here]() for more details
|
| 94 |
+
router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
|
| 95 |
+
The aux loss factor for the total loss.
|
| 96 |
+
router_jitter_noise (`float`, *optional*, defaults to 0.0):
|
| 97 |
+
Amount of noise to add to the router.
|
| 98 |
+
|
| 99 |
+
```python
|
| 100 |
+
>>> from transformers import MiniMaxM2Model, MiniMaxM2Config
|
| 101 |
+
|
| 102 |
+
>>> # Initializing a MiniMaxM2 7B style configuration
|
| 103 |
+
>>> configuration = MiniMaxM2Config()
|
| 104 |
+
|
| 105 |
+
>>> # Initializing a model from the MiniMaxM2 7B style configuration
|
| 106 |
+
>>> model = MiniMaxM2Model(configuration)
|
| 107 |
+
|
| 108 |
+
>>> # Accessing the model configuration
|
| 109 |
+
>>> configuration = model.config
|
| 110 |
+
```"""
|
| 111 |
+
|
| 112 |
+
model_type = "minimax_m2"
|
| 113 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
| 114 |
+
base_model_tp_plan = {
|
| 115 |
+
"layers.*.self_attn.q_proj": "colwise",
|
| 116 |
+
"layers.*.self_attn.k_proj": "colwise",
|
| 117 |
+
"layers.*.self_attn.v_proj": "colwise",
|
| 118 |
+
"layers.*.self_attn.o_proj": "rowwise",
|
| 119 |
+
"layers.*.block_sparse_moe.gate": "colwise_rep", # we need to replicate here to correctly route experts
|
| 120 |
+
"layers.*.block_sparse_moe.experts.*.w1": "colwise",
|
| 121 |
+
"layers.*.block_sparse_moe.experts.*.w2": "rowwise",
|
| 122 |
+
"layers.*.block_sparse_moe.experts.*.w3": "colwise",
|
| 123 |
+
}
|
| 124 |
+
base_model_pp_plan = {
|
| 125 |
+
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
|
| 126 |
+
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
|
| 127 |
+
"norm": (["hidden_states"], ["hidden_states"]),
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
def __init__(
|
| 131 |
+
self,
|
| 132 |
+
vocab_size=32000,
|
| 133 |
+
hidden_size=4096,
|
| 134 |
+
intermediate_size=14336,
|
| 135 |
+
num_hidden_layers=32,
|
| 136 |
+
num_attention_heads=32,
|
| 137 |
+
num_key_value_heads=8,
|
| 138 |
+
head_dim=None,
|
| 139 |
+
hidden_act="silu",
|
| 140 |
+
max_position_embeddings=4096 * 32,
|
| 141 |
+
initializer_range=0.02,
|
| 142 |
+
rms_norm_eps=1e-5,
|
| 143 |
+
use_cache=True,
|
| 144 |
+
pad_token_id=None,
|
| 145 |
+
bos_token_id=1,
|
| 146 |
+
eos_token_id=2,
|
| 147 |
+
tie_word_embeddings=False,
|
| 148 |
+
rope_theta=1e6,
|
| 149 |
+
sliding_window=None,
|
| 150 |
+
attention_dropout=0.0,
|
| 151 |
+
num_experts_per_tok=2,
|
| 152 |
+
num_local_experts=8,
|
| 153 |
+
output_router_logits=False,
|
| 154 |
+
router_aux_loss_coef=0.001,
|
| 155 |
+
router_jitter_noise=0.0,
|
| 156 |
+
**kwargs,
|
| 157 |
+
):
|
| 158 |
+
self.vocab_size = vocab_size
|
| 159 |
+
self.max_position_embeddings = max_position_embeddings
|
| 160 |
+
self.hidden_size = hidden_size
|
| 161 |
+
self.intermediate_size = intermediate_size
|
| 162 |
+
self.num_hidden_layers = num_hidden_layers
|
| 163 |
+
self.num_attention_heads = num_attention_heads
|
| 164 |
+
self.sliding_window = sliding_window
|
| 165 |
+
|
| 166 |
+
# for backward compatibility
|
| 167 |
+
if num_key_value_heads is None:
|
| 168 |
+
num_key_value_heads = num_attention_heads
|
| 169 |
+
|
| 170 |
+
self.num_key_value_heads = num_key_value_heads
|
| 171 |
+
self.hidden_act = hidden_act
|
| 172 |
+
self.initializer_range = initializer_range
|
| 173 |
+
self.rms_norm_eps = rms_norm_eps
|
| 174 |
+
self.use_cache = use_cache
|
| 175 |
+
self.rope_theta = rope_theta
|
| 176 |
+
self.attention_dropout = attention_dropout
|
| 177 |
+
self.head_dim = head_dim
|
| 178 |
+
|
| 179 |
+
self.num_experts_per_tok = num_experts_per_tok
|
| 180 |
+
self.num_local_experts = num_local_experts
|
| 181 |
+
self.output_router_logits = output_router_logits
|
| 182 |
+
self.router_aux_loss_coef = router_aux_loss_coef
|
| 183 |
+
self.router_jitter_noise = router_jitter_noise
|
| 184 |
+
|
| 185 |
+
self.use_qk_norm = kwargs.pop("use_qk_norm", False)
|
| 186 |
+
self.rotary_dim = kwargs.pop("rotary_dim", self.head_dim)
|
| 187 |
+
self.partial_rotary_factor = kwargs.pop("partial_rotary_factor", 1)
|
| 188 |
+
if self.head_dim is not None:
|
| 189 |
+
self.partial_rotary_factor = self.rotary_dim / self.head_dim
|
| 190 |
+
|
| 191 |
+
super().__init__(
|
| 192 |
+
pad_token_id=pad_token_id,
|
| 193 |
+
bos_token_id=bos_token_id,
|
| 194 |
+
eos_token_id=eos_token_id,
|
| 195 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 196 |
+
**kwargs,
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
__all__ = ["MiniMaxM2Config"]
|
generation_config.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token_id": 200019,
|
| 3 |
+
"do_sample": true,
|
| 4 |
+
"eos_token_id": 200020,
|
| 5 |
+
"top_k": 40,
|
| 6 |
+
"top_p": 0.95,
|
| 7 |
+
"transformers_version": "4.55.0"
|
| 8 |
+
}
|
merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
model-00001-of-00023.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ae71fc8b2a295c6fbeffc1392102d7535545861aeaba35f33b8a7e800cefd5e9
|
| 3 |
+
size 4025916304
|
model-00002-of-00023.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cff95bbaaf738158c49ec2cb1f78891dff3085ebae4a16ae4e97b74a1b7b8bc0
|
| 3 |
+
size 4101923464
|
model-00003-of-00023.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1917d5b06a4d56aa898d71c920210a3e01476b52964d77d111cbe3bf6de53b71
|
| 3 |
+
size 4001397992
|
model-00004-of-00023.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:75ad493476dc54d8419bbf44326f1211a3255d7b4c039e848761b49a021239cf
|
| 3 |
+
size 4082311232
|
model-00005-of-00023.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f5f19e158e39b2e06c69c4d5dfc2c4526e4bfb4094a658a19ff23dc08deb5d37
|
| 3 |
+
size 3979336488
|
model-00006-of-00023.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c252e29c4cea0d75be995641e909e3ceb3a6d70a5c9b6203808bdd1c12a4a4e0
|
| 3 |
+
size 4081498824
|
model-00007-of-00023.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:980f530b268308dfbab9f5d31d1dc4f51969809491543a2641714c9e61fa7a2c
|
| 3 |
+
size 4008758248
|
model-00008-of-00023.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cbd35327f4d1439854f9f2112cce4142beda27aab55ab4277fc6a85428626dff
|
| 3 |
+
size 4087217352
|
model-00009-of-00023.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f6c74141c15fcef45e6494f07cd7a8da9c1e0af3557213878e0cdfb22068ff4e
|
| 3 |
+
size 4033277008
|
model-00010-of-00023.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4a068087befe8de7752b2dd1ccabf0a420126081682f58ada992017dbd8db3ef
|
| 3 |
+
size 4018565992
|
model-00011-of-00023.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:671e8d2773bde8d789bc5148c81062aca8d1fc632950a16143cddf854cb0e5f5
|
| 3 |
+
size 4049624984
|
model-00012-of-00023.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:db25db97b8500ebf01414615df9e576e097c3c7bd280ab6d9902243b78384c25
|
| 3 |
+
size 4001402712
|
model-00013-of-00023.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5a0d63088ff703e057e9ddd76f51e14599bfb8ce27d37733930ac7b86b8fda62
|
| 3 |
+
size 4089669200
|
model-00014-of-00023.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bde944af17a17b518b6e36ad1265b8dfc8e760c42c836735450a02d6b0a0d988
|
| 3 |
+
size 4082313768
|
model-00015-of-00023.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:08781903d895e9d7fb60645a09881e643f9109a1d07227ae4b82e8d8faa8f7c5
|
| 3 |
+
size 4001403080
|
model-00016-of-00023.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7d997546378ed7368096ad37efc60eab2fcfdbe65cef39fd7c83177133f75d35
|
| 3 |
+
size 4045536272
|
model-00017-of-00023.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7970b3812ceb21a03c5aefead78562ebdb3f26890615106cb3ad0f2fd363d7d4
|
| 3 |
+
size 4030010048
|
model-00018-of-00023.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ac1e70e3b07cbf9dd0b70d2599b5f05195f58a7d1979bc4de14735586b5be256
|
| 3 |
+
size 4089669200
|
model-00019-of-00023.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4e38c40bc31b09a92d5ebad7257510f8ce869912cc2457c6fee97fe9d49b9a85
|
| 3 |
+
size 3979336128
|
model-00020-of-00023.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b1a901493f818a53f5d9a4c38b6eeddf6f742ed4192ee99423f2b7baf6667c67
|
| 3 |
+
size 4104380528
|
model-00021-of-00023.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b84e04c5b8228c70719558e53f32c3ff3bb7778f4b27da68db292fae2413f1d0
|
| 3 |
+
size 4030825208
|
model-00022-of-00023.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:70fd65c55585dd9575db271954742aaecfec8240d066628d35964cc6d83aa677
|
| 3 |
+
size 4022654680
|
model-00023-of-00023.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9bde0602ad6822ed612a6cba8af4a6086242823bab55a66997ad330ba64281ea
|
| 3 |
+
size 2565146520
|
model.safetensors.index.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bcb0594c6171cd3f865e272400290c16d3d9bed9a0728c931a48a6bfaf7a8f8d
|
| 3 |
+
size 10551137
|
modeling_minimax_m2.py
ADDED
|
@@ -0,0 +1,733 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
| 2 |
+
# This file was automatically generated from src/transformers/models/minimax_m2/modular_minimax_m2.py.
|
| 3 |
+
# Do NOT edit this file manually as any edits will be overwritten by the generation of
|
| 4 |
+
# the file from the modular. If any change should be done, please apply the change to the
|
| 5 |
+
# modular_minimax_m2.py file directly. One of our CI enforces this.
|
| 6 |
+
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
| 7 |
+
# coding=utf-8
|
| 8 |
+
# Copyright 2025 the HuggingFace Team. All rights reserved.
|
| 9 |
+
#
|
| 10 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 11 |
+
# you may not use this file except in compliance with the License.
|
| 12 |
+
# You may obtain a copy of the License at
|
| 13 |
+
#
|
| 14 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 15 |
+
#
|
| 16 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 17 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 18 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 19 |
+
# See the License for the specific language governing permissions and
|
| 20 |
+
# limitations under the License.
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
from collections.abc import Callable
|
| 24 |
+
from typing import Optional, Union, Unpack
|
| 25 |
+
|
| 26 |
+
import torch
|
| 27 |
+
from torch import nn
|
| 28 |
+
|
| 29 |
+
from transformers.activations import ACT2FN
|
| 30 |
+
from transformers.cache_utils import Cache, DynamicCache
|
| 31 |
+
from transformers.generation import GenerationMixin
|
| 32 |
+
from transformers.integrations import use_kernel_forward_from_hub
|
| 33 |
+
from transformers.masking_utils import create_causal_mask, create_sliding_window_causal_mask
|
| 34 |
+
from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
|
| 35 |
+
from transformers.modeling_layers import (
|
| 36 |
+
GenericForQuestionAnswering,
|
| 37 |
+
GenericForSequenceClassification,
|
| 38 |
+
GenericForTokenClassification,
|
| 39 |
+
GradientCheckpointingLayer,
|
| 40 |
+
)
|
| 41 |
+
from transformers.modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
|
| 42 |
+
from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
|
| 43 |
+
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
|
| 44 |
+
from transformers.utils import TransformersKwargs, auto_docstring, can_return_tuple
|
| 45 |
+
from transformers.utils.deprecation import deprecate_kwarg
|
| 46 |
+
from transformers.utils.generic import OutputRecorder, check_model_inputs
|
| 47 |
+
from .configuration_minimax_m2 import MiniMaxM2Config
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
# -----------------------------------------------------------------------
|
| 51 |
+
# transformers >= 5.0 removed the "default" key from ROPE_INIT_FUNCTIONS
|
| 52 |
+
# (the standard LLaMA-style base-theta inverse-frequency RoPE) and moved
|
| 53 |
+
# that path inline. This checkpoint's config has no ``rope_scaling`` block,
|
| 54 |
+
# so the rope_type resolves to "default" below and the dict lookup crashes
|
| 55 |
+
# with KeyError on transformers 5.x. Re-register a plain default here so
|
| 56 |
+
# this modeling file stays portable across transformers 4.x and 5.x. The
|
| 57 |
+
# injection is a no-op when "default" is already present (older tf).
|
| 58 |
+
# -----------------------------------------------------------------------
|
| 59 |
+
if "default" not in ROPE_INIT_FUNCTIONS:
|
| 60 |
+
|
| 61 |
+
def _minimax_m2_default_rope_init(config, device=None, seq_len=None, layer_type=None):
|
| 62 |
+
base = config.rope_theta
|
| 63 |
+
partial = getattr(config, "partial_rotary_factor", 1.0)
|
| 64 |
+
head_dim = getattr(config, "head_dim", None) or (
|
| 65 |
+
config.hidden_size // config.num_attention_heads
|
| 66 |
+
)
|
| 67 |
+
dim = int(head_dim * partial)
|
| 68 |
+
idx = torch.arange(0, dim, 2, dtype=torch.int64).to(
|
| 69 |
+
device=device, dtype=torch.float
|
| 70 |
+
)
|
| 71 |
+
inv_freq = 1.0 / (base ** (idx / dim))
|
| 72 |
+
return inv_freq, 1.0
|
| 73 |
+
|
| 74 |
+
ROPE_INIT_FUNCTIONS["default"] = _minimax_m2_default_rope_init
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
class MiniMaxM2MLP(nn.Module):
|
| 78 |
+
def __init__(self, config: MiniMaxM2Config):
|
| 79 |
+
super().__init__()
|
| 80 |
+
self.ffn_dim = config.intermediate_size
|
| 81 |
+
self.hidden_dim = config.hidden_size
|
| 82 |
+
|
| 83 |
+
self.w1 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
|
| 84 |
+
self.w2 = nn.Linear(self.ffn_dim, self.hidden_dim, bias=False)
|
| 85 |
+
self.w3 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
|
| 86 |
+
|
| 87 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
| 88 |
+
|
| 89 |
+
def forward(self, hidden_states):
|
| 90 |
+
current_hidden_states = self.act_fn(self.w1(hidden_states)) * self.w3(hidden_states)
|
| 91 |
+
current_hidden_states = self.w2(current_hidden_states)
|
| 92 |
+
return current_hidden_states
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
class MiniMaxM2Experts(nn.ModuleList):
|
| 96 |
+
"""
|
| 97 |
+
ModuleList of experts.
|
| 98 |
+
"""
|
| 99 |
+
|
| 100 |
+
def __init__(self, config: MiniMaxM2Config):
|
| 101 |
+
super().__init__()
|
| 102 |
+
self.top_k = config.num_experts_per_tok
|
| 103 |
+
self.num_experts = config.num_local_experts
|
| 104 |
+
for _ in range(self.num_experts):
|
| 105 |
+
self.append(MiniMaxM2MLP(config))
|
| 106 |
+
|
| 107 |
+
def forward(
|
| 108 |
+
self, hidden_states: torch.Tensor, top_k_index: torch.Tensor, top_k_weights: torch.Tensor
|
| 109 |
+
) -> torch.Tensor:
|
| 110 |
+
"""
|
| 111 |
+
Args:
|
| 112 |
+
hidden_states: (batch_size * sequence_length, hidden_dim)
|
| 113 |
+
selected_experts: (batch_size * sequence_length, top_k)
|
| 114 |
+
routing_weights: (batch_size * sequence_length, top_k)
|
| 115 |
+
Returns:
|
| 116 |
+
(batch_size * sequence_length, hidden_dim)
|
| 117 |
+
"""
|
| 118 |
+
final_hidden_states = torch.zeros_like(hidden_states)
|
| 119 |
+
expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=self.num_experts).permute(2, 1, 0)
|
| 120 |
+
|
| 121 |
+
expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
|
| 122 |
+
for expert_idx in expert_hit:
|
| 123 |
+
idx, top_x = torch.where(expert_mask[expert_idx].squeeze(0))
|
| 124 |
+
current_state = hidden_states[None, top_x].reshape(-1, hidden_states.shape[-1])
|
| 125 |
+
current_hidden_states = self[expert_idx](current_state) * top_k_weights[top_x, idx, None]
|
| 126 |
+
final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
|
| 127 |
+
return final_hidden_states
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
class MiniMaxM2SparseMoeBlock(nn.Module):
|
| 131 |
+
def __init__(self, config):
|
| 132 |
+
super().__init__()
|
| 133 |
+
self.top_k = config.num_experts_per_tok
|
| 134 |
+
self.jitter_noise = config.router_jitter_noise
|
| 135 |
+
self.gate = nn.Linear(config.hidden_size, config.num_local_experts, bias=False)
|
| 136 |
+
self.experts = MiniMaxM2Experts(config)
|
| 137 |
+
self.register_buffer("e_score_correction_bias", torch.zeros(config.num_local_experts))
|
| 138 |
+
|
| 139 |
+
def route_tokens_to_experts(self, router_logits):
|
| 140 |
+
routing_weights = torch.nn.functional.sigmoid(router_logits.float())
|
| 141 |
+
scores_for_choice = routing_weights + self.e_score_correction_bias
|
| 142 |
+
_, top_k_index = torch.topk(scores_for_choice, self.top_k, dim=-1, sorted=False)
|
| 143 |
+
top_k_weights = routing_weights.gather(1, top_k_index)
|
| 144 |
+
top_k_weights /= top_k_weights.sum(dim=-1, keepdim=True)
|
| 145 |
+
return top_k_index, top_k_weights.to(router_logits.dtype)
|
| 146 |
+
|
| 147 |
+
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
|
| 148 |
+
batch_size, sequence_length, hidden_dim = hidden_states.shape
|
| 149 |
+
if self.training and self.jitter_noise > 0:
|
| 150 |
+
hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise)
|
| 151 |
+
hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
|
| 152 |
+
router_logits = self.gate(hidden_states)
|
| 153 |
+
top_k_index, top_k_weights = self.route_tokens_to_experts(router_logits)
|
| 154 |
+
hidden_states = self.experts(hidden_states, top_k_index, top_k_weights.to(hidden_states.dtype))
|
| 155 |
+
hidden_states = hidden_states.reshape(batch_size, sequence_length, hidden_dim)
|
| 156 |
+
return hidden_states, router_logits
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
@use_kernel_forward_from_hub("RMSNorm")
|
| 160 |
+
class MiniMaxM2RMSNorm(nn.Module):
|
| 161 |
+
def __init__(self, hidden_size, eps=1e-6):
|
| 162 |
+
"""
|
| 163 |
+
MiniMaxM2RMSNorm is equivalent to T5LayerNorm
|
| 164 |
+
"""
|
| 165 |
+
super().__init__()
|
| 166 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
| 167 |
+
self.variance_epsilon = eps
|
| 168 |
+
|
| 169 |
+
def forward(self, hidden_states):
|
| 170 |
+
input_dtype = hidden_states.dtype
|
| 171 |
+
hidden_states = hidden_states.to(torch.float32)
|
| 172 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
| 173 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
| 174 |
+
return self.weight * hidden_states.to(input_dtype)
|
| 175 |
+
|
| 176 |
+
def extra_repr(self):
|
| 177 |
+
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
| 181 |
+
"""
|
| 182 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
| 183 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
| 184 |
+
"""
|
| 185 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
| 186 |
+
if n_rep == 1:
|
| 187 |
+
return hidden_states
|
| 188 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
| 189 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
def eager_attention_forward(
|
| 193 |
+
module: nn.Module,
|
| 194 |
+
query: torch.Tensor,
|
| 195 |
+
key: torch.Tensor,
|
| 196 |
+
value: torch.Tensor,
|
| 197 |
+
attention_mask: Optional[torch.Tensor],
|
| 198 |
+
scaling: float,
|
| 199 |
+
dropout: float = 0.0,
|
| 200 |
+
**kwargs: Unpack[TransformersKwargs],
|
| 201 |
+
):
|
| 202 |
+
key_states = repeat_kv(key, module.num_key_value_groups)
|
| 203 |
+
value_states = repeat_kv(value, module.num_key_value_groups)
|
| 204 |
+
|
| 205 |
+
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
|
| 206 |
+
if attention_mask is not None:
|
| 207 |
+
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
|
| 208 |
+
attn_weights = attn_weights + causal_mask
|
| 209 |
+
|
| 210 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
|
| 211 |
+
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
|
| 212 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
| 213 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 214 |
+
|
| 215 |
+
return attn_output, attn_weights
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
def rotate_half(x):
|
| 219 |
+
"""Rotates half the hidden dims of the input."""
|
| 220 |
+
x1 = x[..., : x.shape[-1] // 2]
|
| 221 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
| 222 |
+
return torch.cat((-x2, x1), dim=-1)
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
|
| 226 |
+
"""Applies Rotary Position Embedding to the query and key tensors.
|
| 227 |
+
|
| 228 |
+
Args:
|
| 229 |
+
q (`torch.Tensor`): The query tensor.
|
| 230 |
+
k (`torch.Tensor`): The key tensor.
|
| 231 |
+
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
| 232 |
+
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
| 233 |
+
position_ids (`torch.Tensor`, *optional*):
|
| 234 |
+
Deprecated and unused.
|
| 235 |
+
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
| 236 |
+
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
| 237 |
+
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
| 238 |
+
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
|
| 239 |
+
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
| 240 |
+
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
|
| 241 |
+
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
| 242 |
+
Returns:
|
| 243 |
+
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
| 244 |
+
"""
|
| 245 |
+
cos = cos.unsqueeze(unsqueeze_dim)
|
| 246 |
+
sin = sin.unsqueeze(unsqueeze_dim)
|
| 247 |
+
|
| 248 |
+
# Keep half or full tensor for later concatenation
|
| 249 |
+
rotary_dim = cos.shape[-1]
|
| 250 |
+
q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
|
| 251 |
+
k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
|
| 252 |
+
|
| 253 |
+
# Apply rotary embeddings on the first half or full tensor
|
| 254 |
+
q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin)
|
| 255 |
+
k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin)
|
| 256 |
+
|
| 257 |
+
# Concatenate back to full shape
|
| 258 |
+
q_embed = torch.cat([q_embed, q_pass], dim=-1)
|
| 259 |
+
k_embed = torch.cat([k_embed, k_pass], dim=-1)
|
| 260 |
+
return q_embed, k_embed
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
class MiniMaxM2Attention(nn.Module):
|
| 264 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
| 265 |
+
|
| 266 |
+
def __init__(self, config: MiniMaxM2Config, layer_idx: int):
|
| 267 |
+
super().__init__()
|
| 268 |
+
self.config = config
|
| 269 |
+
self.layer_idx = layer_idx
|
| 270 |
+
self.head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
|
| 271 |
+
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
|
| 272 |
+
self.scaling = self.head_dim**-0.5
|
| 273 |
+
self.attention_dropout = config.attention_dropout
|
| 274 |
+
self.is_causal = True
|
| 275 |
+
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False)
|
| 276 |
+
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
|
| 277 |
+
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
|
| 278 |
+
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
|
| 279 |
+
|
| 280 |
+
self.use_qk_norm = config.use_qk_norm
|
| 281 |
+
if self.use_qk_norm:
|
| 282 |
+
self.q_norm = MiniMaxM2RMSNorm(self.head_dim * config.num_attention_heads, eps=config.rms_norm_eps)
|
| 283 |
+
self.k_norm = MiniMaxM2RMSNorm(self.head_dim * config.num_key_value_heads, eps=config.rms_norm_eps)
|
| 284 |
+
|
| 285 |
+
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
|
| 286 |
+
def forward(
|
| 287 |
+
self,
|
| 288 |
+
hidden_states: torch.Tensor,
|
| 289 |
+
position_embeddings: tuple[torch.Tensor, torch.Tensor],
|
| 290 |
+
attention_mask: Optional[torch.Tensor],
|
| 291 |
+
past_key_values: Optional[Cache] = None,
|
| 292 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 293 |
+
**kwargs: Unpack[FlashAttentionKwargs],
|
| 294 |
+
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
|
| 295 |
+
input_shape = hidden_states.shape[:-1]
|
| 296 |
+
hidden_shape = (*input_shape, -1, self.head_dim)
|
| 297 |
+
|
| 298 |
+
query_states = self.q_proj(hidden_states)
|
| 299 |
+
key_states = self.k_proj(hidden_states)
|
| 300 |
+
value_states = self.v_proj(hidden_states)
|
| 301 |
+
|
| 302 |
+
if self.use_qk_norm: # main diff from Llama
|
| 303 |
+
query_states = self.q_norm(query_states)
|
| 304 |
+
key_states = self.k_norm(key_states)
|
| 305 |
+
|
| 306 |
+
key_states = key_states.view(hidden_shape)
|
| 307 |
+
query_states = query_states.view(hidden_shape)
|
| 308 |
+
value_states = value_states.view(hidden_shape)
|
| 309 |
+
|
| 310 |
+
query_states = query_states.transpose(1, 2)
|
| 311 |
+
key_states = key_states.transpose(1, 2)
|
| 312 |
+
value_states = value_states.transpose(1, 2)
|
| 313 |
+
|
| 314 |
+
cos, sin = position_embeddings
|
| 315 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
|
| 316 |
+
|
| 317 |
+
if past_key_values is not None:
|
| 318 |
+
# sin and cos are specific to RoPE models; position_ids needed for the static cache
|
| 319 |
+
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
|
| 320 |
+
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 321 |
+
|
| 322 |
+
attention_interface: Callable = eager_attention_forward
|
| 323 |
+
if self.config._attn_implementation != "eager":
|
| 324 |
+
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
|
| 325 |
+
|
| 326 |
+
attn_output, attn_weights = attention_interface(
|
| 327 |
+
self,
|
| 328 |
+
query_states,
|
| 329 |
+
key_states,
|
| 330 |
+
value_states,
|
| 331 |
+
attention_mask,
|
| 332 |
+
dropout=0.0 if not self.training else self.attention_dropout,
|
| 333 |
+
scaling=self.scaling,
|
| 334 |
+
**kwargs,
|
| 335 |
+
)
|
| 336 |
+
|
| 337 |
+
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
|
| 338 |
+
attn_output = self.o_proj(attn_output)
|
| 339 |
+
return attn_output, attn_weights
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
class MiniMaxM2DecoderLayer(GradientCheckpointingLayer):
|
| 343 |
+
def __init__(self, config: MiniMaxM2Config, layer_idx: int):
|
| 344 |
+
super().__init__()
|
| 345 |
+
self.hidden_size = config.hidden_size
|
| 346 |
+
|
| 347 |
+
self.self_attn = MiniMaxM2Attention(config, layer_idx)
|
| 348 |
+
|
| 349 |
+
self.block_sparse_moe = MiniMaxM2SparseMoeBlock(config)
|
| 350 |
+
self.input_layernorm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 351 |
+
self.post_attention_layernorm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 352 |
+
|
| 353 |
+
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
|
| 354 |
+
def forward(
|
| 355 |
+
self,
|
| 356 |
+
hidden_states: torch.Tensor,
|
| 357 |
+
position_embeddings: tuple[torch.Tensor, torch.Tensor],
|
| 358 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 359 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 360 |
+
past_key_values: Optional[Cache] = None,
|
| 361 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 362 |
+
**kwargs: Unpack[TransformersKwargs],
|
| 363 |
+
) -> torch.FloatTensor:
|
| 364 |
+
residual = hidden_states
|
| 365 |
+
|
| 366 |
+
hidden_states = self.input_layernorm(hidden_states)
|
| 367 |
+
|
| 368 |
+
# Self Attention
|
| 369 |
+
hidden_states, _ = self.self_attn(
|
| 370 |
+
hidden_states=hidden_states,
|
| 371 |
+
position_embeddings=position_embeddings,
|
| 372 |
+
attention_mask=attention_mask,
|
| 373 |
+
position_ids=position_ids,
|
| 374 |
+
past_key_values=past_key_values,
|
| 375 |
+
cache_position=cache_position,
|
| 376 |
+
**kwargs,
|
| 377 |
+
)
|
| 378 |
+
hidden_states = residual + hidden_states
|
| 379 |
+
|
| 380 |
+
# Fully Connected
|
| 381 |
+
residual = hidden_states
|
| 382 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
| 383 |
+
hidden_states, _ = self.block_sparse_moe(hidden_states)
|
| 384 |
+
hidden_states = residual + hidden_states
|
| 385 |
+
|
| 386 |
+
return hidden_states
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
class MiniMaxM2RotaryEmbedding(nn.Module):
|
| 390 |
+
inv_freq: torch.Tensor # fix linting for `register_buffer`
|
| 391 |
+
|
| 392 |
+
def __init__(self, config: MiniMaxM2Config, device=None):
|
| 393 |
+
super().__init__()
|
| 394 |
+
# BC: "rope_type" was originally "type"
|
| 395 |
+
if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict):
|
| 396 |
+
self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
|
| 397 |
+
else:
|
| 398 |
+
self.rope_type = "default"
|
| 399 |
+
self.max_seq_len_cached = config.max_position_embeddings
|
| 400 |
+
self.original_max_seq_len = config.max_position_embeddings
|
| 401 |
+
|
| 402 |
+
self.config = config
|
| 403 |
+
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
|
| 404 |
+
|
| 405 |
+
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
|
| 406 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
| 407 |
+
self.original_inv_freq = self.inv_freq
|
| 408 |
+
|
| 409 |
+
@torch.no_grad()
|
| 410 |
+
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
|
| 411 |
+
def forward(self, x, position_ids):
|
| 412 |
+
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
|
| 413 |
+
position_ids_expanded = position_ids[:, None, :].float()
|
| 414 |
+
|
| 415 |
+
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
|
| 416 |
+
with torch.autocast(device_type=device_type, enabled=False): # Force float32
|
| 417 |
+
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
|
| 418 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
| 419 |
+
cos = emb.cos() * self.attention_scaling
|
| 420 |
+
sin = emb.sin() * self.attention_scaling
|
| 421 |
+
|
| 422 |
+
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
|
| 423 |
+
|
| 424 |
+
|
| 425 |
+
@auto_docstring
|
| 426 |
+
class MiniMaxM2PreTrainedModel(PreTrainedModel):
|
| 427 |
+
config: MiniMaxM2Config
|
| 428 |
+
base_model_prefix = "model"
|
| 429 |
+
supports_gradient_checkpointing = True
|
| 430 |
+
_no_split_modules = ["MiniMaxM2DecoderLayer"]
|
| 431 |
+
_skip_keys_device_placement = ["past_key_values"]
|
| 432 |
+
_supports_flash_attn = True
|
| 433 |
+
_supports_sdpa = True
|
| 434 |
+
_supports_flex_attn = True
|
| 435 |
+
_can_compile_fullgraph = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported)
|
| 436 |
+
_supports_attention_backend = True
|
| 437 |
+
_can_record_outputs = {
|
| 438 |
+
"router_logits": OutputRecorder(MiniMaxM2SparseMoeBlock, index=1),
|
| 439 |
+
"hidden_states": MiniMaxM2DecoderLayer,
|
| 440 |
+
"attentions": MiniMaxM2Attention,
|
| 441 |
+
}
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
@auto_docstring
|
| 445 |
+
class MiniMaxM2Model(MiniMaxM2PreTrainedModel):
|
| 446 |
+
def __init__(self, config: MiniMaxM2Config):
|
| 447 |
+
super().__init__(config)
|
| 448 |
+
self.padding_idx = config.pad_token_id
|
| 449 |
+
self.vocab_size = config.vocab_size
|
| 450 |
+
|
| 451 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 452 |
+
self.layers = nn.ModuleList(
|
| 453 |
+
[MiniMaxM2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
| 454 |
+
)
|
| 455 |
+
self.norm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 456 |
+
self.rotary_emb = MiniMaxM2RotaryEmbedding(config=config)
|
| 457 |
+
self.gradient_checkpointing = False
|
| 458 |
+
|
| 459 |
+
# Initialize weights and apply final processing
|
| 460 |
+
self.post_init()
|
| 461 |
+
|
| 462 |
+
@check_model_inputs
|
| 463 |
+
@auto_docstring
|
| 464 |
+
def forward(
|
| 465 |
+
self,
|
| 466 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 467 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 468 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 469 |
+
past_key_values: Optional[Cache] = None,
|
| 470 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 471 |
+
use_cache: Optional[bool] = None,
|
| 472 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 473 |
+
**kwargs: Unpack[TransformersKwargs],
|
| 474 |
+
) -> MoeModelOutputWithPast:
|
| 475 |
+
if (input_ids is None) ^ (inputs_embeds is not None):
|
| 476 |
+
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
|
| 477 |
+
|
| 478 |
+
if use_cache and past_key_values is None:
|
| 479 |
+
past_key_values = DynamicCache(config=self.config)
|
| 480 |
+
|
| 481 |
+
if inputs_embeds is None:
|
| 482 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
| 483 |
+
|
| 484 |
+
if cache_position is None:
|
| 485 |
+
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
|
| 486 |
+
cache_position = torch.arange(
|
| 487 |
+
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
|
| 488 |
+
)
|
| 489 |
+
if position_ids is None:
|
| 490 |
+
position_ids = cache_position.unsqueeze(0)
|
| 491 |
+
|
| 492 |
+
mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask
|
| 493 |
+
causal_mask = mask_function(
|
| 494 |
+
config=self.config,
|
| 495 |
+
input_embeds=inputs_embeds,
|
| 496 |
+
attention_mask=attention_mask,
|
| 497 |
+
cache_position=cache_position,
|
| 498 |
+
past_key_values=past_key_values,
|
| 499 |
+
position_ids=position_ids,
|
| 500 |
+
)
|
| 501 |
+
|
| 502 |
+
hidden_states = inputs_embeds
|
| 503 |
+
|
| 504 |
+
# create position embeddings to be shared across the decoder layers
|
| 505 |
+
position_embeddings = self.rotary_emb(hidden_states, position_ids)
|
| 506 |
+
|
| 507 |
+
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
|
| 508 |
+
hidden_states = decoder_layer(
|
| 509 |
+
hidden_states,
|
| 510 |
+
position_embeddings=position_embeddings,
|
| 511 |
+
attention_mask=causal_mask,
|
| 512 |
+
position_ids=position_ids,
|
| 513 |
+
past_key_values=past_key_values,
|
| 514 |
+
use_cache=use_cache,
|
| 515 |
+
cache_position=cache_position,
|
| 516 |
+
**kwargs,
|
| 517 |
+
)
|
| 518 |
+
|
| 519 |
+
hidden_states = self.norm(hidden_states)
|
| 520 |
+
|
| 521 |
+
return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE
|
| 522 |
+
last_hidden_state=hidden_states,
|
| 523 |
+
past_key_values=past_key_values,
|
| 524 |
+
)
|
| 525 |
+
|
| 526 |
+
|
| 527 |
+
def load_balancing_loss_func(
|
| 528 |
+
gate_logits: Union[torch.Tensor, tuple[torch.Tensor], None],
|
| 529 |
+
num_experts: Optional[int] = None,
|
| 530 |
+
top_k=2,
|
| 531 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 532 |
+
) -> Union[torch.Tensor, int]:
|
| 533 |
+
r"""
|
| 534 |
+
Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
|
| 535 |
+
|
| 536 |
+
See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
|
| 537 |
+
function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
|
| 538 |
+
experts is too unbalanced.
|
| 539 |
+
|
| 540 |
+
Args:
|
| 541 |
+
gate_logits:
|
| 542 |
+
Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
|
| 543 |
+
shape [batch_size X sequence_length, num_experts].
|
| 544 |
+
num_experts:
|
| 545 |
+
Number of experts
|
| 546 |
+
top_k:
|
| 547 |
+
The number of experts to route per-token, can be also interpreted as the `top-k` routing
|
| 548 |
+
parameter.
|
| 549 |
+
attention_mask (`torch.Tensor`, *optional*):
|
| 550 |
+
The attention_mask used in forward function
|
| 551 |
+
shape [batch_size X sequence_length] if not None.
|
| 552 |
+
|
| 553 |
+
Returns:
|
| 554 |
+
The auxiliary loss.
|
| 555 |
+
"""
|
| 556 |
+
if gate_logits is None or not isinstance(gate_logits, tuple):
|
| 557 |
+
return 0
|
| 558 |
+
|
| 559 |
+
if isinstance(gate_logits, tuple):
|
| 560 |
+
compute_device = gate_logits[0].device
|
| 561 |
+
concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
|
| 562 |
+
|
| 563 |
+
routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
|
| 564 |
+
|
| 565 |
+
_, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
|
| 566 |
+
|
| 567 |
+
expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
|
| 568 |
+
|
| 569 |
+
if attention_mask is None:
|
| 570 |
+
# Compute the percentage of tokens routed to each experts
|
| 571 |
+
tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
|
| 572 |
+
|
| 573 |
+
# Compute the average probability of routing to these experts
|
| 574 |
+
router_prob_per_expert = torch.mean(routing_weights, dim=0)
|
| 575 |
+
else:
|
| 576 |
+
batch_size, sequence_length = attention_mask.shape
|
| 577 |
+
num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
|
| 578 |
+
|
| 579 |
+
# Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
|
| 580 |
+
expert_attention_mask = (
|
| 581 |
+
attention_mask[None, :, :, None, None]
|
| 582 |
+
.expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
|
| 583 |
+
.reshape(-1, top_k, num_experts)
|
| 584 |
+
.to(compute_device)
|
| 585 |
+
)
|
| 586 |
+
|
| 587 |
+
# Compute the percentage of tokens routed to each experts
|
| 588 |
+
tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
|
| 589 |
+
expert_attention_mask, dim=0
|
| 590 |
+
)
|
| 591 |
+
|
| 592 |
+
# Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
|
| 593 |
+
router_per_expert_attention_mask = (
|
| 594 |
+
attention_mask[None, :, :, None]
|
| 595 |
+
.expand((num_hidden_layers, batch_size, sequence_length, num_experts))
|
| 596 |
+
.reshape(-1, num_experts)
|
| 597 |
+
.to(compute_device)
|
| 598 |
+
)
|
| 599 |
+
|
| 600 |
+
# Compute the average probability of routing to these experts
|
| 601 |
+
router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
|
| 602 |
+
router_per_expert_attention_mask, dim=0
|
| 603 |
+
)
|
| 604 |
+
|
| 605 |
+
overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
|
| 606 |
+
return overall_loss * num_experts
|
| 607 |
+
|
| 608 |
+
|
| 609 |
+
@auto_docstring
|
| 610 |
+
class MiniMaxM2ForCausalLM(MiniMaxM2PreTrainedModel, GenerationMixin):
|
| 611 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 612 |
+
_tp_plan = {"lm_head": "colwise_rep"}
|
| 613 |
+
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
|
| 614 |
+
|
| 615 |
+
def __init__(self, config):
|
| 616 |
+
super().__init__(config)
|
| 617 |
+
self.model = MiniMaxM2Model(config)
|
| 618 |
+
self.vocab_size = config.vocab_size
|
| 619 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 620 |
+
self.router_aux_loss_coef = config.router_aux_loss_coef
|
| 621 |
+
self.num_experts = config.num_local_experts
|
| 622 |
+
self.num_experts_per_tok = config.num_experts_per_tok
|
| 623 |
+
|
| 624 |
+
# Initialize weights and apply final processing
|
| 625 |
+
self.post_init()
|
| 626 |
+
|
| 627 |
+
@can_return_tuple
|
| 628 |
+
@auto_docstring
|
| 629 |
+
def forward(
|
| 630 |
+
self,
|
| 631 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 632 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 633 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 634 |
+
past_key_values: Optional[Cache] = None,
|
| 635 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 636 |
+
labels: Optional[torch.LongTensor] = None,
|
| 637 |
+
use_cache: Optional[bool] = None,
|
| 638 |
+
output_router_logits: Optional[bool] = None,
|
| 639 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 640 |
+
logits_to_keep: Union[int, torch.Tensor] = 0,
|
| 641 |
+
**kwargs: Unpack[TransformersKwargs],
|
| 642 |
+
) -> MoeCausalLMOutputWithPast:
|
| 643 |
+
r"""
|
| 644 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 645 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
| 646 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
| 647 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
| 648 |
+
|
| 649 |
+
Example:
|
| 650 |
+
|
| 651 |
+
```python
|
| 652 |
+
>>> from transformers import AutoTokenizer, MiniMaxM2ForCausalLM
|
| 653 |
+
|
| 654 |
+
>>> model = MiniMaxM2ForCausalLM.from_pretrained("mistralai/MiniMaxM2-8x7B-v0.1")
|
| 655 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/MiniMaxM2-8x7B-v0.1")
|
| 656 |
+
|
| 657 |
+
>>> prompt = "Hey, are you conscious? Can you talk to me?"
|
| 658 |
+
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
| 659 |
+
|
| 660 |
+
>>> # Generate
|
| 661 |
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
| 662 |
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
| 663 |
+
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
| 664 |
+
```"""
|
| 665 |
+
|
| 666 |
+
output_router_logits = (
|
| 667 |
+
output_router_logits if output_router_logits is not None else self.config.output_router_logits
|
| 668 |
+
)
|
| 669 |
+
|
| 670 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
| 671 |
+
outputs: MoeModelOutputWithPast = self.model(
|
| 672 |
+
input_ids=input_ids,
|
| 673 |
+
attention_mask=attention_mask,
|
| 674 |
+
position_ids=position_ids,
|
| 675 |
+
past_key_values=past_key_values,
|
| 676 |
+
inputs_embeds=inputs_embeds,
|
| 677 |
+
use_cache=use_cache,
|
| 678 |
+
output_router_logits=output_router_logits,
|
| 679 |
+
cache_position=cache_position,
|
| 680 |
+
**kwargs,
|
| 681 |
+
)
|
| 682 |
+
|
| 683 |
+
hidden_states = outputs.last_hidden_state
|
| 684 |
+
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
|
| 685 |
+
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
|
| 686 |
+
logits = self.lm_head(hidden_states[:, slice_indices, :])
|
| 687 |
+
|
| 688 |
+
loss = None
|
| 689 |
+
if labels is not None:
|
| 690 |
+
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
|
| 691 |
+
|
| 692 |
+
aux_loss = None
|
| 693 |
+
if output_router_logits:
|
| 694 |
+
aux_loss = load_balancing_loss_func(
|
| 695 |
+
outputs.router_logits,
|
| 696 |
+
self.num_experts,
|
| 697 |
+
self.num_experts_per_tok,
|
| 698 |
+
attention_mask,
|
| 699 |
+
)
|
| 700 |
+
if labels is not None:
|
| 701 |
+
loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
|
| 702 |
+
|
| 703 |
+
return MoeCausalLMOutputWithPast(
|
| 704 |
+
loss=loss,
|
| 705 |
+
aux_loss=aux_loss,
|
| 706 |
+
logits=logits,
|
| 707 |
+
past_key_values=outputs.past_key_values,
|
| 708 |
+
hidden_states=outputs.hidden_states,
|
| 709 |
+
attentions=outputs.attentions,
|
| 710 |
+
router_logits=outputs.router_logits,
|
| 711 |
+
)
|
| 712 |
+
|
| 713 |
+
|
| 714 |
+
class MiniMaxM2ForSequenceClassification(GenericForSequenceClassification, MiniMaxM2PreTrainedModel):
|
| 715 |
+
pass
|
| 716 |
+
|
| 717 |
+
|
| 718 |
+
class MiniMaxM2ForTokenClassification(GenericForTokenClassification, MiniMaxM2PreTrainedModel):
|
| 719 |
+
pass
|
| 720 |
+
|
| 721 |
+
|
| 722 |
+
class MiniMaxM2ForQuestionAnswering(GenericForQuestionAnswering, MiniMaxM2PreTrainedModel):
|
| 723 |
+
pass
|
| 724 |
+
|
| 725 |
+
|
| 726 |
+
__all__ = [
|
| 727 |
+
"MiniMaxM2ForCausalLM",
|
| 728 |
+
"MiniMaxM2ForQuestionAnswering",
|
| 729 |
+
"MiniMaxM2Model",
|
| 730 |
+
"MiniMaxM2PreTrainedModel",
|
| 731 |
+
"MiniMaxM2ForSequenceClassification",
|
| 732 |
+
"MiniMaxM2ForTokenClassification",
|
| 733 |
+
]
|
quantization_config.json
ADDED
|
@@ -0,0 +1,263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bits": 4,
|
| 3 |
+
"data_type": "int",
|
| 4 |
+
"group_size": 128,
|
| 5 |
+
"sym": true,
|
| 6 |
+
"batch_size": 1,
|
| 7 |
+
"iters": 0,
|
| 8 |
+
"low_gpu_mem_usage": true,
|
| 9 |
+
"seqlen": 512,
|
| 10 |
+
"autoround_version": "0.12.2",
|
| 11 |
+
"quant_method": "auto-round",
|
| 12 |
+
"packing_format": "auto_round:auto_gptq",
|
| 13 |
+
"extra_config": {
|
| 14 |
+
"model.layers.0.block_sparse_moe.gate": {
|
| 15 |
+
"bits": 16,
|
| 16 |
+
"data_type": "float"
|
| 17 |
+
},
|
| 18 |
+
"model.layers.1.block_sparse_moe.gate": {
|
| 19 |
+
"bits": 16,
|
| 20 |
+
"data_type": "float"
|
| 21 |
+
},
|
| 22 |
+
"model.layers.2.block_sparse_moe.gate": {
|
| 23 |
+
"bits": 16,
|
| 24 |
+
"data_type": "float"
|
| 25 |
+
},
|
| 26 |
+
"model.layers.3.block_sparse_moe.gate": {
|
| 27 |
+
"bits": 16,
|
| 28 |
+
"data_type": "float"
|
| 29 |
+
},
|
| 30 |
+
"model.layers.4.block_sparse_moe.gate": {
|
| 31 |
+
"bits": 16,
|
| 32 |
+
"data_type": "float"
|
| 33 |
+
},
|
| 34 |
+
"model.layers.5.block_sparse_moe.gate": {
|
| 35 |
+
"bits": 16,
|
| 36 |
+
"data_type": "float"
|
| 37 |
+
},
|
| 38 |
+
"model.layers.6.block_sparse_moe.gate": {
|
| 39 |
+
"bits": 16,
|
| 40 |
+
"data_type": "float"
|
| 41 |
+
},
|
| 42 |
+
"model.layers.7.block_sparse_moe.gate": {
|
| 43 |
+
"bits": 16,
|
| 44 |
+
"data_type": "float"
|
| 45 |
+
},
|
| 46 |
+
"model.layers.8.block_sparse_moe.gate": {
|
| 47 |
+
"bits": 16,
|
| 48 |
+
"data_type": "float"
|
| 49 |
+
},
|
| 50 |
+
"model.layers.9.block_sparse_moe.gate": {
|
| 51 |
+
"bits": 16,
|
| 52 |
+
"data_type": "float"
|
| 53 |
+
},
|
| 54 |
+
"model.layers.10.block_sparse_moe.gate": {
|
| 55 |
+
"bits": 16,
|
| 56 |
+
"data_type": "float"
|
| 57 |
+
},
|
| 58 |
+
"model.layers.11.block_sparse_moe.gate": {
|
| 59 |
+
"bits": 16,
|
| 60 |
+
"data_type": "float"
|
| 61 |
+
},
|
| 62 |
+
"model.layers.12.block_sparse_moe.gate": {
|
| 63 |
+
"bits": 16,
|
| 64 |
+
"data_type": "float"
|
| 65 |
+
},
|
| 66 |
+
"model.layers.13.block_sparse_moe.gate": {
|
| 67 |
+
"bits": 16,
|
| 68 |
+
"data_type": "float"
|
| 69 |
+
},
|
| 70 |
+
"model.layers.14.block_sparse_moe.gate": {
|
| 71 |
+
"bits": 16,
|
| 72 |
+
"data_type": "float"
|
| 73 |
+
},
|
| 74 |
+
"model.layers.15.block_sparse_moe.gate": {
|
| 75 |
+
"bits": 16,
|
| 76 |
+
"data_type": "float"
|
| 77 |
+
},
|
| 78 |
+
"model.layers.16.block_sparse_moe.gate": {
|
| 79 |
+
"bits": 16,
|
| 80 |
+
"data_type": "float"
|
| 81 |
+
},
|
| 82 |
+
"model.layers.17.block_sparse_moe.gate": {
|
| 83 |
+
"bits": 16,
|
| 84 |
+
"data_type": "float"
|
| 85 |
+
},
|
| 86 |
+
"model.layers.18.block_sparse_moe.gate": {
|
| 87 |
+
"bits": 16,
|
| 88 |
+
"data_type": "float"
|
| 89 |
+
},
|
| 90 |
+
"model.layers.19.block_sparse_moe.gate": {
|
| 91 |
+
"bits": 16,
|
| 92 |
+
"data_type": "float"
|
| 93 |
+
},
|
| 94 |
+
"model.layers.20.block_sparse_moe.gate": {
|
| 95 |
+
"bits": 16,
|
| 96 |
+
"data_type": "float"
|
| 97 |
+
},
|
| 98 |
+
"model.layers.21.block_sparse_moe.gate": {
|
| 99 |
+
"bits": 16,
|
| 100 |
+
"data_type": "float"
|
| 101 |
+
},
|
| 102 |
+
"model.layers.22.block_sparse_moe.gate": {
|
| 103 |
+
"bits": 16,
|
| 104 |
+
"data_type": "float"
|
| 105 |
+
},
|
| 106 |
+
"model.layers.23.block_sparse_moe.gate": {
|
| 107 |
+
"bits": 16,
|
| 108 |
+
"data_type": "float"
|
| 109 |
+
},
|
| 110 |
+
"model.layers.24.block_sparse_moe.gate": {
|
| 111 |
+
"bits": 16,
|
| 112 |
+
"data_type": "float"
|
| 113 |
+
},
|
| 114 |
+
"model.layers.25.block_sparse_moe.gate": {
|
| 115 |
+
"bits": 16,
|
| 116 |
+
"data_type": "float"
|
| 117 |
+
},
|
| 118 |
+
"model.layers.26.block_sparse_moe.gate": {
|
| 119 |
+
"bits": 16,
|
| 120 |
+
"data_type": "float"
|
| 121 |
+
},
|
| 122 |
+
"model.layers.27.block_sparse_moe.gate": {
|
| 123 |
+
"bits": 16,
|
| 124 |
+
"data_type": "float"
|
| 125 |
+
},
|
| 126 |
+
"model.layers.28.block_sparse_moe.gate": {
|
| 127 |
+
"bits": 16,
|
| 128 |
+
"data_type": "float"
|
| 129 |
+
},
|
| 130 |
+
"model.layers.29.block_sparse_moe.gate": {
|
| 131 |
+
"bits": 16,
|
| 132 |
+
"data_type": "float"
|
| 133 |
+
},
|
| 134 |
+
"model.layers.30.block_sparse_moe.gate": {
|
| 135 |
+
"bits": 16,
|
| 136 |
+
"data_type": "float"
|
| 137 |
+
},
|
| 138 |
+
"model.layers.31.block_sparse_moe.gate": {
|
| 139 |
+
"bits": 16,
|
| 140 |
+
"data_type": "float"
|
| 141 |
+
},
|
| 142 |
+
"model.layers.32.block_sparse_moe.gate": {
|
| 143 |
+
"bits": 16,
|
| 144 |
+
"data_type": "float"
|
| 145 |
+
},
|
| 146 |
+
"model.layers.33.block_sparse_moe.gate": {
|
| 147 |
+
"bits": 16,
|
| 148 |
+
"data_type": "float"
|
| 149 |
+
},
|
| 150 |
+
"model.layers.34.block_sparse_moe.gate": {
|
| 151 |
+
"bits": 16,
|
| 152 |
+
"data_type": "float"
|
| 153 |
+
},
|
| 154 |
+
"model.layers.35.block_sparse_moe.gate": {
|
| 155 |
+
"bits": 16,
|
| 156 |
+
"data_type": "float"
|
| 157 |
+
},
|
| 158 |
+
"model.layers.36.block_sparse_moe.gate": {
|
| 159 |
+
"bits": 16,
|
| 160 |
+
"data_type": "float"
|
| 161 |
+
},
|
| 162 |
+
"model.layers.37.block_sparse_moe.gate": {
|
| 163 |
+
"bits": 16,
|
| 164 |
+
"data_type": "float"
|
| 165 |
+
},
|
| 166 |
+
"model.layers.38.block_sparse_moe.gate": {
|
| 167 |
+
"bits": 16,
|
| 168 |
+
"data_type": "float"
|
| 169 |
+
},
|
| 170 |
+
"model.layers.39.block_sparse_moe.gate": {
|
| 171 |
+
"bits": 16,
|
| 172 |
+
"data_type": "float"
|
| 173 |
+
},
|
| 174 |
+
"model.layers.40.block_sparse_moe.gate": {
|
| 175 |
+
"bits": 16,
|
| 176 |
+
"data_type": "float"
|
| 177 |
+
},
|
| 178 |
+
"model.layers.41.block_sparse_moe.gate": {
|
| 179 |
+
"bits": 16,
|
| 180 |
+
"data_type": "float"
|
| 181 |
+
},
|
| 182 |
+
"model.layers.42.block_sparse_moe.gate": {
|
| 183 |
+
"bits": 16,
|
| 184 |
+
"data_type": "float"
|
| 185 |
+
},
|
| 186 |
+
"model.layers.43.block_sparse_moe.gate": {
|
| 187 |
+
"bits": 16,
|
| 188 |
+
"data_type": "float"
|
| 189 |
+
},
|
| 190 |
+
"model.layers.44.block_sparse_moe.gate": {
|
| 191 |
+
"bits": 16,
|
| 192 |
+
"data_type": "float"
|
| 193 |
+
},
|
| 194 |
+
"model.layers.45.block_sparse_moe.gate": {
|
| 195 |
+
"bits": 16,
|
| 196 |
+
"data_type": "float"
|
| 197 |
+
},
|
| 198 |
+
"model.layers.46.block_sparse_moe.gate": {
|
| 199 |
+
"bits": 16,
|
| 200 |
+
"data_type": "float"
|
| 201 |
+
},
|
| 202 |
+
"model.layers.47.block_sparse_moe.gate": {
|
| 203 |
+
"bits": 16,
|
| 204 |
+
"data_type": "float"
|
| 205 |
+
},
|
| 206 |
+
"model.layers.48.block_sparse_moe.gate": {
|
| 207 |
+
"bits": 16,
|
| 208 |
+
"data_type": "float"
|
| 209 |
+
},
|
| 210 |
+
"model.layers.49.block_sparse_moe.gate": {
|
| 211 |
+
"bits": 16,
|
| 212 |
+
"data_type": "float"
|
| 213 |
+
},
|
| 214 |
+
"model.layers.50.block_sparse_moe.gate": {
|
| 215 |
+
"bits": 16,
|
| 216 |
+
"data_type": "float"
|
| 217 |
+
},
|
| 218 |
+
"model.layers.51.block_sparse_moe.gate": {
|
| 219 |
+
"bits": 16,
|
| 220 |
+
"data_type": "float"
|
| 221 |
+
},
|
| 222 |
+
"model.layers.52.block_sparse_moe.gate": {
|
| 223 |
+
"bits": 16,
|
| 224 |
+
"data_type": "float"
|
| 225 |
+
},
|
| 226 |
+
"model.layers.53.block_sparse_moe.gate": {
|
| 227 |
+
"bits": 16,
|
| 228 |
+
"data_type": "float"
|
| 229 |
+
},
|
| 230 |
+
"model.layers.54.block_sparse_moe.gate": {
|
| 231 |
+
"bits": 16,
|
| 232 |
+
"data_type": "float"
|
| 233 |
+
},
|
| 234 |
+
"model.layers.55.block_sparse_moe.gate": {
|
| 235 |
+
"bits": 16,
|
| 236 |
+
"data_type": "float"
|
| 237 |
+
},
|
| 238 |
+
"model.layers.56.block_sparse_moe.gate": {
|
| 239 |
+
"bits": 16,
|
| 240 |
+
"data_type": "float"
|
| 241 |
+
},
|
| 242 |
+
"model.layers.57.block_sparse_moe.gate": {
|
| 243 |
+
"bits": 16,
|
| 244 |
+
"data_type": "float"
|
| 245 |
+
},
|
| 246 |
+
"model.layers.58.block_sparse_moe.gate": {
|
| 247 |
+
"bits": 16,
|
| 248 |
+
"data_type": "float"
|
| 249 |
+
},
|
| 250 |
+
"model.layers.59.block_sparse_moe.gate": {
|
| 251 |
+
"bits": 16,
|
| 252 |
+
"data_type": "float"
|
| 253 |
+
},
|
| 254 |
+
"model.layers.60.block_sparse_moe.gate": {
|
| 255 |
+
"bits": 16,
|
| 256 |
+
"data_type": "float"
|
| 257 |
+
},
|
| 258 |
+
"model.layers.61.block_sparse_moe.gate": {
|
| 259 |
+
"bits": 16,
|
| 260 |
+
"data_type": "float"
|
| 261 |
+
}
|
| 262 |
+
}
|
| 263 |
+
}
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"<code_interpreter>",
|
| 4 |
+
"<commit_after>",
|
| 5 |
+
"<commit_before>",
|
| 6 |
+
"<commit_msg>",
|
| 7 |
+
"<empty_output>",
|
| 8 |
+
"<filename>",
|
| 9 |
+
"<fim_middle>",
|
| 10 |
+
"<fim_pad>",
|
| 11 |
+
"<fim_prefix>",
|
| 12 |
+
"<fim_suffix>",
|
| 13 |
+
"<function_call>",
|
| 14 |
+
"<gh_stars>",
|
| 15 |
+
"]<]speech[>[",
|
| 16 |
+
"]<]image[>[",
|
| 17 |
+
"]<]video[>[",
|
| 18 |
+
"]<]start of speech[>[",
|
| 19 |
+
"]<]end of speech[>[",
|
| 20 |
+
"]<]start of image[>[",
|
| 21 |
+
"]<]end of image[>[",
|
| 22 |
+
"]<]start of video[>[",
|
| 23 |
+
"]<]end of video[>[",
|
| 24 |
+
"]<]vision pad[>[",
|
| 25 |
+
"]~!b[",
|
| 26 |
+
"<issue_closed>",
|
| 27 |
+
"<issue_comment>",
|
| 28 |
+
"<issue_start>",
|
| 29 |
+
"<jupyter_code>",
|
| 30 |
+
"<jupyter_output>",
|
| 31 |
+
"<jupyter_start>",
|
| 32 |
+
"<jupyter_text>",
|
| 33 |
+
"<reponame>",
|
| 34 |
+
"[e~[",
|
| 35 |
+
"]!d~[",
|
| 36 |
+
"]!p~[",
|
| 37 |
+
"]~b]",
|
| 38 |
+
"<jupyter_error>",
|
| 39 |
+
"<add_file>",
|
| 40 |
+
"<delete_file>",
|
| 41 |
+
"<rename_file>",
|
| 42 |
+
"<edit_file>",
|
| 43 |
+
"<commit_message>",
|
| 44 |
+
"<empty_source_file>",
|
| 45 |
+
"<repo_struct>",
|
| 46 |
+
"<code_context>",
|
| 47 |
+
"<file_content>",
|
| 48 |
+
"<source_files>",
|
| 49 |
+
"<pr_start>",
|
| 50 |
+
"<review_comment>",
|
| 51 |
+
"<filepath>",
|
| 52 |
+
"<file_sep>"
|
| 53 |
+
],
|
| 54 |
+
"bos_token": {
|
| 55 |
+
"content": "]~!b[",
|
| 56 |
+
"lstrip": false,
|
| 57 |
+
"normalized": false,
|
| 58 |
+
"rstrip": false,
|
| 59 |
+
"single_word": false
|
| 60 |
+
},
|
| 61 |
+
"eos_token": {
|
| 62 |
+
"content": "[e~[",
|
| 63 |
+
"lstrip": false,
|
| 64 |
+
"normalized": false,
|
| 65 |
+
"rstrip": false,
|
| 66 |
+
"single_word": false
|
| 67 |
+
},
|
| 68 |
+
"unk_token": {
|
| 69 |
+
"content": "]!d~[",
|
| 70 |
+
"lstrip": false,
|
| 71 |
+
"normalized": false,
|
| 72 |
+
"rstrip": false,
|
| 73 |
+
"single_word": false
|
| 74 |
+
}
|
| 75 |
+
}
|
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e67402211c3ec8525e3fcdd775c62791e60cb291c6b94dfecff990cfb58e0bd2
|
| 3 |
+
size 15522861
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,496 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"added_tokens_decoder": {
|
| 4 |
+
"200000": {
|
| 5 |
+
"content": "]!p~[",
|
| 6 |
+
"lstrip": false,
|
| 7 |
+
"normalized": false,
|
| 8 |
+
"rstrip": false,
|
| 9 |
+
"single_word": false,
|
| 10 |
+
"special": true
|
| 11 |
+
},
|
| 12 |
+
"200001": {
|
| 13 |
+
"content": "<fim_prefix>",
|
| 14 |
+
"lstrip": false,
|
| 15 |
+
"normalized": false,
|
| 16 |
+
"rstrip": false,
|
| 17 |
+
"single_word": false,
|
| 18 |
+
"special": true
|
| 19 |
+
},
|
| 20 |
+
"200002": {
|
| 21 |
+
"content": "<fim_middle>",
|
| 22 |
+
"lstrip": false,
|
| 23 |
+
"normalized": false,
|
| 24 |
+
"rstrip": false,
|
| 25 |
+
"single_word": false,
|
| 26 |
+
"special": true
|
| 27 |
+
},
|
| 28 |
+
"200003": {
|
| 29 |
+
"content": "<fim_suffix>",
|
| 30 |
+
"lstrip": false,
|
| 31 |
+
"normalized": false,
|
| 32 |
+
"rstrip": false,
|
| 33 |
+
"single_word": false,
|
| 34 |
+
"special": true
|
| 35 |
+
},
|
| 36 |
+
"200004": {
|
| 37 |
+
"content": "<fim_pad>",
|
| 38 |
+
"lstrip": false,
|
| 39 |
+
"normalized": false,
|
| 40 |
+
"rstrip": false,
|
| 41 |
+
"single_word": false,
|
| 42 |
+
"special": true
|
| 43 |
+
},
|
| 44 |
+
"200005": {
|
| 45 |
+
"content": "<reponame>",
|
| 46 |
+
"lstrip": false,
|
| 47 |
+
"normalized": false,
|
| 48 |
+
"rstrip": false,
|
| 49 |
+
"single_word": false,
|
| 50 |
+
"special": true
|
| 51 |
+
},
|
| 52 |
+
"200006": {
|
| 53 |
+
"content": "<filename>",
|
| 54 |
+
"lstrip": false,
|
| 55 |
+
"normalized": false,
|
| 56 |
+
"rstrip": false,
|
| 57 |
+
"single_word": false,
|
| 58 |
+
"special": true
|
| 59 |
+
},
|
| 60 |
+
"200007": {
|
| 61 |
+
"content": "<gh_stars>",
|
| 62 |
+
"lstrip": false,
|
| 63 |
+
"normalized": false,
|
| 64 |
+
"rstrip": false,
|
| 65 |
+
"single_word": false,
|
| 66 |
+
"special": true
|
| 67 |
+
},
|
| 68 |
+
"200008": {
|
| 69 |
+
"content": "<issue_start>",
|
| 70 |
+
"lstrip": false,
|
| 71 |
+
"normalized": false,
|
| 72 |
+
"rstrip": false,
|
| 73 |
+
"single_word": false,
|
| 74 |
+
"special": true
|
| 75 |
+
},
|
| 76 |
+
"200009": {
|
| 77 |
+
"content": "<issue_comment>",
|
| 78 |
+
"lstrip": false,
|
| 79 |
+
"normalized": false,
|
| 80 |
+
"rstrip": false,
|
| 81 |
+
"single_word": false,
|
| 82 |
+
"special": true
|
| 83 |
+
},
|
| 84 |
+
"200010": {
|
| 85 |
+
"content": "<issue_closed>",
|
| 86 |
+
"lstrip": false,
|
| 87 |
+
"normalized": false,
|
| 88 |
+
"rstrip": false,
|
| 89 |
+
"single_word": false,
|
| 90 |
+
"special": true
|
| 91 |
+
},
|
| 92 |
+
"200011": {
|
| 93 |
+
"content": "<jupyter_start>",
|
| 94 |
+
"lstrip": false,
|
| 95 |
+
"normalized": false,
|
| 96 |
+
"rstrip": false,
|
| 97 |
+
"single_word": false,
|
| 98 |
+
"special": true
|
| 99 |
+
},
|
| 100 |
+
"200012": {
|
| 101 |
+
"content": "<jupyter_text>",
|
| 102 |
+
"lstrip": false,
|
| 103 |
+
"normalized": false,
|
| 104 |
+
"rstrip": false,
|
| 105 |
+
"single_word": false,
|
| 106 |
+
"special": true
|
| 107 |
+
},
|
| 108 |
+
"200013": {
|
| 109 |
+
"content": "<jupyter_code>",
|
| 110 |
+
"lstrip": false,
|
| 111 |
+
"normalized": false,
|
| 112 |
+
"rstrip": false,
|
| 113 |
+
"single_word": false,
|
| 114 |
+
"special": true
|
| 115 |
+
},
|
| 116 |
+
"200014": {
|
| 117 |
+
"content": "<jupyter_output>",
|
| 118 |
+
"lstrip": false,
|
| 119 |
+
"normalized": false,
|
| 120 |
+
"rstrip": false,
|
| 121 |
+
"single_word": false,
|
| 122 |
+
"special": true
|
| 123 |
+
},
|
| 124 |
+
"200015": {
|
| 125 |
+
"content": "<empty_output>",
|
| 126 |
+
"lstrip": false,
|
| 127 |
+
"normalized": false,
|
| 128 |
+
"rstrip": false,
|
| 129 |
+
"single_word": false,
|
| 130 |
+
"special": true
|
| 131 |
+
},
|
| 132 |
+
"200016": {
|
| 133 |
+
"content": "<commit_before>",
|
| 134 |
+
"lstrip": false,
|
| 135 |
+
"normalized": false,
|
| 136 |
+
"rstrip": false,
|
| 137 |
+
"single_word": false,
|
| 138 |
+
"special": true
|
| 139 |
+
},
|
| 140 |
+
"200017": {
|
| 141 |
+
"content": "<commit_msg>",
|
| 142 |
+
"lstrip": false,
|
| 143 |
+
"normalized": false,
|
| 144 |
+
"rstrip": false,
|
| 145 |
+
"single_word": false,
|
| 146 |
+
"special": true
|
| 147 |
+
},
|
| 148 |
+
"200018": {
|
| 149 |
+
"content": "<commit_after>",
|
| 150 |
+
"lstrip": false,
|
| 151 |
+
"normalized": false,
|
| 152 |
+
"rstrip": false,
|
| 153 |
+
"single_word": false,
|
| 154 |
+
"special": true
|
| 155 |
+
},
|
| 156 |
+
"200019": {
|
| 157 |
+
"content": "]~b]",
|
| 158 |
+
"lstrip": false,
|
| 159 |
+
"normalized": false,
|
| 160 |
+
"rstrip": false,
|
| 161 |
+
"single_word": false,
|
| 162 |
+
"special": true
|
| 163 |
+
},
|
| 164 |
+
"200020": {
|
| 165 |
+
"content": "[e~[",
|
| 166 |
+
"lstrip": false,
|
| 167 |
+
"normalized": false,
|
| 168 |
+
"rstrip": false,
|
| 169 |
+
"single_word": false,
|
| 170 |
+
"special": true
|
| 171 |
+
},
|
| 172 |
+
"200021": {
|
| 173 |
+
"content": "]!d~[",
|
| 174 |
+
"lstrip": false,
|
| 175 |
+
"normalized": false,
|
| 176 |
+
"rstrip": false,
|
| 177 |
+
"single_word": false,
|
| 178 |
+
"special": true
|
| 179 |
+
},
|
| 180 |
+
"200022": {
|
| 181 |
+
"content": "<function_call>",
|
| 182 |
+
"lstrip": false,
|
| 183 |
+
"normalized": false,
|
| 184 |
+
"rstrip": false,
|
| 185 |
+
"single_word": false,
|
| 186 |
+
"special": true
|
| 187 |
+
},
|
| 188 |
+
"200023": {
|
| 189 |
+
"content": "<code_interpreter>",
|
| 190 |
+
"lstrip": false,
|
| 191 |
+
"normalized": false,
|
| 192 |
+
"rstrip": false,
|
| 193 |
+
"single_word": false,
|
| 194 |
+
"special": true
|
| 195 |
+
},
|
| 196 |
+
"200024": {
|
| 197 |
+
"content": "]<]speech[>[",
|
| 198 |
+
"lstrip": false,
|
| 199 |
+
"normalized": false,
|
| 200 |
+
"rstrip": false,
|
| 201 |
+
"single_word": false,
|
| 202 |
+
"special": true
|
| 203 |
+
},
|
| 204 |
+
"200025": {
|
| 205 |
+
"content": "]<]image[>[",
|
| 206 |
+
"lstrip": false,
|
| 207 |
+
"normalized": false,
|
| 208 |
+
"rstrip": false,
|
| 209 |
+
"single_word": false,
|
| 210 |
+
"special": true
|
| 211 |
+
},
|
| 212 |
+
"200026": {
|
| 213 |
+
"content": "]<]video[>[",
|
| 214 |
+
"lstrip": false,
|
| 215 |
+
"normalized": false,
|
| 216 |
+
"rstrip": false,
|
| 217 |
+
"single_word": false,
|
| 218 |
+
"special": true
|
| 219 |
+
},
|
| 220 |
+
"200027": {
|
| 221 |
+
"content": "]<]start of speech[>[",
|
| 222 |
+
"lstrip": false,
|
| 223 |
+
"normalized": false,
|
| 224 |
+
"rstrip": false,
|
| 225 |
+
"single_word": false,
|
| 226 |
+
"special": true
|
| 227 |
+
},
|
| 228 |
+
"200028": {
|
| 229 |
+
"content": "]<]end of speech[>[",
|
| 230 |
+
"lstrip": false,
|
| 231 |
+
"normalized": false,
|
| 232 |
+
"rstrip": false,
|
| 233 |
+
"single_word": false,
|
| 234 |
+
"special": true
|
| 235 |
+
},
|
| 236 |
+
"200029": {
|
| 237 |
+
"content": "]<]start of image[>[",
|
| 238 |
+
"lstrip": false,
|
| 239 |
+
"normalized": false,
|
| 240 |
+
"rstrip": false,
|
| 241 |
+
"single_word": false,
|
| 242 |
+
"special": true
|
| 243 |
+
},
|
| 244 |
+
"200030": {
|
| 245 |
+
"content": "]<]end of image[>[",
|
| 246 |
+
"lstrip": false,
|
| 247 |
+
"normalized": false,
|
| 248 |
+
"rstrip": false,
|
| 249 |
+
"single_word": false,
|
| 250 |
+
"special": true
|
| 251 |
+
},
|
| 252 |
+
"200031": {
|
| 253 |
+
"content": "]<]start of video[>[",
|
| 254 |
+
"lstrip": false,
|
| 255 |
+
"normalized": false,
|
| 256 |
+
"rstrip": false,
|
| 257 |
+
"single_word": false,
|
| 258 |
+
"special": true
|
| 259 |
+
},
|
| 260 |
+
"200032": {
|
| 261 |
+
"content": "]<]end of video[>[",
|
| 262 |
+
"lstrip": false,
|
| 263 |
+
"normalized": false,
|
| 264 |
+
"rstrip": false,
|
| 265 |
+
"single_word": false,
|
| 266 |
+
"special": true
|
| 267 |
+
},
|
| 268 |
+
"200033": {
|
| 269 |
+
"content": "]<]vision pad[>[",
|
| 270 |
+
"lstrip": false,
|
| 271 |
+
"normalized": false,
|
| 272 |
+
"rstrip": false,
|
| 273 |
+
"single_word": false,
|
| 274 |
+
"special": true
|
| 275 |
+
},
|
| 276 |
+
"200034": {
|
| 277 |
+
"content": "]~!b[",
|
| 278 |
+
"lstrip": false,
|
| 279 |
+
"normalized": false,
|
| 280 |
+
"rstrip": false,
|
| 281 |
+
"single_word": false,
|
| 282 |
+
"special": true
|
| 283 |
+
},
|
| 284 |
+
"200035": {
|
| 285 |
+
"content": "<jupyter_error>",
|
| 286 |
+
"lstrip": false,
|
| 287 |
+
"normalized": false,
|
| 288 |
+
"rstrip": false,
|
| 289 |
+
"single_word": false,
|
| 290 |
+
"special": true
|
| 291 |
+
},
|
| 292 |
+
"200036": {
|
| 293 |
+
"content": "<add_file>",
|
| 294 |
+
"lstrip": false,
|
| 295 |
+
"normalized": false,
|
| 296 |
+
"rstrip": false,
|
| 297 |
+
"single_word": false,
|
| 298 |
+
"special": true
|
| 299 |
+
},
|
| 300 |
+
"200037": {
|
| 301 |
+
"content": "<delete_file>",
|
| 302 |
+
"lstrip": false,
|
| 303 |
+
"normalized": false,
|
| 304 |
+
"rstrip": false,
|
| 305 |
+
"single_word": false,
|
| 306 |
+
"special": true
|
| 307 |
+
},
|
| 308 |
+
"200038": {
|
| 309 |
+
"content": "<rename_file>",
|
| 310 |
+
"lstrip": false,
|
| 311 |
+
"normalized": false,
|
| 312 |
+
"rstrip": false,
|
| 313 |
+
"single_word": false,
|
| 314 |
+
"special": true
|
| 315 |
+
},
|
| 316 |
+
"200039": {
|
| 317 |
+
"content": "<edit_file>",
|
| 318 |
+
"lstrip": false,
|
| 319 |
+
"normalized": false,
|
| 320 |
+
"rstrip": false,
|
| 321 |
+
"single_word": false,
|
| 322 |
+
"special": true
|
| 323 |
+
},
|
| 324 |
+
"200040": {
|
| 325 |
+
"content": "<commit_message>",
|
| 326 |
+
"lstrip": false,
|
| 327 |
+
"normalized": false,
|
| 328 |
+
"rstrip": false,
|
| 329 |
+
"single_word": false,
|
| 330 |
+
"special": true
|
| 331 |
+
},
|
| 332 |
+
"200041": {
|
| 333 |
+
"content": "<empty_source_file>",
|
| 334 |
+
"lstrip": false,
|
| 335 |
+
"normalized": false,
|
| 336 |
+
"rstrip": false,
|
| 337 |
+
"single_word": false,
|
| 338 |
+
"special": true
|
| 339 |
+
},
|
| 340 |
+
"200042": {
|
| 341 |
+
"content": "<repo_struct>",
|
| 342 |
+
"lstrip": false,
|
| 343 |
+
"normalized": false,
|
| 344 |
+
"rstrip": false,
|
| 345 |
+
"single_word": false,
|
| 346 |
+
"special": true
|
| 347 |
+
},
|
| 348 |
+
"200043": {
|
| 349 |
+
"content": "<code_context>",
|
| 350 |
+
"lstrip": false,
|
| 351 |
+
"normalized": false,
|
| 352 |
+
"rstrip": false,
|
| 353 |
+
"single_word": false,
|
| 354 |
+
"special": true
|
| 355 |
+
},
|
| 356 |
+
"200044": {
|
| 357 |
+
"content": "<file_content>",
|
| 358 |
+
"lstrip": false,
|
| 359 |
+
"normalized": false,
|
| 360 |
+
"rstrip": false,
|
| 361 |
+
"single_word": false,
|
| 362 |
+
"special": true
|
| 363 |
+
},
|
| 364 |
+
"200045": {
|
| 365 |
+
"content": "<source_files>",
|
| 366 |
+
"lstrip": false,
|
| 367 |
+
"normalized": false,
|
| 368 |
+
"rstrip": false,
|
| 369 |
+
"single_word": false,
|
| 370 |
+
"special": true
|
| 371 |
+
},
|
| 372 |
+
"200046": {
|
| 373 |
+
"content": "<pr_start>",
|
| 374 |
+
"lstrip": false,
|
| 375 |
+
"normalized": false,
|
| 376 |
+
"rstrip": false,
|
| 377 |
+
"single_word": false,
|
| 378 |
+
"special": true
|
| 379 |
+
},
|
| 380 |
+
"200047": {
|
| 381 |
+
"content": "<review_comment>",
|
| 382 |
+
"lstrip": false,
|
| 383 |
+
"normalized": false,
|
| 384 |
+
"rstrip": false,
|
| 385 |
+
"single_word": false,
|
| 386 |
+
"special": true
|
| 387 |
+
},
|
| 388 |
+
"200048": {
|
| 389 |
+
"content": "<filepath>",
|
| 390 |
+
"lstrip": false,
|
| 391 |
+
"normalized": false,
|
| 392 |
+
"rstrip": false,
|
| 393 |
+
"single_word": false,
|
| 394 |
+
"special": true
|
| 395 |
+
},
|
| 396 |
+
"200049": {
|
| 397 |
+
"content": "<file_sep>",
|
| 398 |
+
"lstrip": false,
|
| 399 |
+
"normalized": false,
|
| 400 |
+
"rstrip": false,
|
| 401 |
+
"single_word": false,
|
| 402 |
+
"special": true
|
| 403 |
+
},
|
| 404 |
+
"200050": {
|
| 405 |
+
"content": "<think>",
|
| 406 |
+
"lstrip": false,
|
| 407 |
+
"normalized": false,
|
| 408 |
+
"rstrip": false,
|
| 409 |
+
"single_word": false,
|
| 410 |
+
"special": false
|
| 411 |
+
},
|
| 412 |
+
"200051": {
|
| 413 |
+
"content": "</think>",
|
| 414 |
+
"lstrip": false,
|
| 415 |
+
"normalized": false,
|
| 416 |
+
"rstrip": false,
|
| 417 |
+
"single_word": false,
|
| 418 |
+
"special": false
|
| 419 |
+
},
|
| 420 |
+
"200052": {
|
| 421 |
+
"content": "<minimax:tool_call>",
|
| 422 |
+
"lstrip": false,
|
| 423 |
+
"normalized": false,
|
| 424 |
+
"rstrip": false,
|
| 425 |
+
"single_word": false,
|
| 426 |
+
"special": false
|
| 427 |
+
},
|
| 428 |
+
"200053": {
|
| 429 |
+
"content": "</minimax:tool_call>",
|
| 430 |
+
"lstrip": false,
|
| 431 |
+
"normalized": false,
|
| 432 |
+
"rstrip": false,
|
| 433 |
+
"single_word": false,
|
| 434 |
+
"special": false
|
| 435 |
+
}
|
| 436 |
+
},
|
| 437 |
+
"additional_special_tokens": [
|
| 438 |
+
"<code_interpreter>",
|
| 439 |
+
"<commit_after>",
|
| 440 |
+
"<commit_before>",
|
| 441 |
+
"<commit_msg>",
|
| 442 |
+
"<empty_output>",
|
| 443 |
+
"<filename>",
|
| 444 |
+
"<fim_middle>",
|
| 445 |
+
"<fim_pad>",
|
| 446 |
+
"<fim_prefix>",
|
| 447 |
+
"<fim_suffix>",
|
| 448 |
+
"<function_call>",
|
| 449 |
+
"<gh_stars>",
|
| 450 |
+
"]<]speech[>[",
|
| 451 |
+
"]<]image[>[",
|
| 452 |
+
"]<]video[>[",
|
| 453 |
+
"]<]start of speech[>[",
|
| 454 |
+
"]<]end of speech[>[",
|
| 455 |
+
"]<]start of image[>[",
|
| 456 |
+
"]<]end of image[>[",
|
| 457 |
+
"]<]start of video[>[",
|
| 458 |
+
"]<]end of video[>[",
|
| 459 |
+
"]<]vision pad[>[",
|
| 460 |
+
"]~!b[",
|
| 461 |
+
"<issue_closed>",
|
| 462 |
+
"<issue_comment>",
|
| 463 |
+
"<issue_start>",
|
| 464 |
+
"<jupyter_code>",
|
| 465 |
+
"<jupyter_output>",
|
| 466 |
+
"<jupyter_start>",
|
| 467 |
+
"<jupyter_text>",
|
| 468 |
+
"<reponame>",
|
| 469 |
+
"[e~[",
|
| 470 |
+
"]!d~[",
|
| 471 |
+
"]!p~[",
|
| 472 |
+
"]~b]",
|
| 473 |
+
"<jupyter_error>",
|
| 474 |
+
"<add_file>",
|
| 475 |
+
"<delete_file>",
|
| 476 |
+
"<rename_file>",
|
| 477 |
+
"<edit_file>",
|
| 478 |
+
"<commit_message>",
|
| 479 |
+
"<empty_source_file>",
|
| 480 |
+
"<repo_struct>",
|
| 481 |
+
"<code_context>",
|
| 482 |
+
"<file_content>",
|
| 483 |
+
"<source_files>",
|
| 484 |
+
"<pr_start>",
|
| 485 |
+
"<review_comment>",
|
| 486 |
+
"<filepath>",
|
| 487 |
+
"<file_sep>"
|
| 488 |
+
],
|
| 489 |
+
"bos_token": "]~!b[",
|
| 490 |
+
"clean_up_tokenization_spaces": false,
|
| 491 |
+
"eos_token": "[e~[",
|
| 492 |
+
"extra_special_tokens": {},
|
| 493 |
+
"model_max_length": 40960000,
|
| 494 |
+
"tokenizer_class": "GPT2Tokenizer",
|
| 495 |
+
"unk_token": "]!d~["
|
| 496 |
+
}
|
vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|