Commit ·
7f2d55e
1
Parent(s): fb01bc4
Added 2026_Q1
Browse files- PyTorchConference2025_GithubRepos.json +362 -181
PyTorchConference2025_GithubRepos.json
CHANGED
|
@@ -6,10 +6,11 @@
|
|
| 6 |
"github_about_section": "The LLVM Project is a collection of modular and reusable compiler and toolchain technologies.",
|
| 7 |
"homepage_link": "http://llvm.org",
|
| 8 |
"github_topic_closest_fit": "compiler",
|
| 9 |
-
"contributors_all":
|
| 10 |
"contributors_2025": 2378,
|
| 11 |
"contributors_2024": 2130,
|
| 12 |
-
"contributors_2023": 1920
|
|
|
|
| 13 |
},
|
| 14 |
{
|
| 15 |
"repo_name": "vllm",
|
|
@@ -18,10 +19,11 @@
|
|
| 18 |
"github_about_section": "A high-throughput and memory-efficient inference and serving engine for LLMs",
|
| 19 |
"homepage_link": "https://docs.vllm.ai",
|
| 20 |
"github_topic_closest_fit": "inference",
|
| 21 |
-
"contributors_all":
|
| 22 |
"contributors_2025": 1369,
|
| 23 |
"contributors_2024": 579,
|
| 24 |
-
"contributors_2023": 145
|
|
|
|
| 25 |
},
|
| 26 |
{
|
| 27 |
"repo_name": "pytorch",
|
|
@@ -30,10 +32,11 @@
|
|
| 30 |
"github_about_section": "Tensors and Dynamic neural networks in Python with strong GPU acceleration",
|
| 31 |
"homepage_link": "https://pytorch.org",
|
| 32 |
"github_topic_closest_fit": "machine-learning",
|
| 33 |
-
"contributors_all":
|
| 34 |
"contributors_2025": 1187,
|
| 35 |
"contributors_2024": 1090,
|
| 36 |
-
"contributors_2023": 1024
|
|
|
|
| 37 |
},
|
| 38 |
{
|
| 39 |
"repo_name": "transformers",
|
|
@@ -42,10 +45,11 @@
|
|
| 42 |
"github_about_section": "Transformers: the model-definition framework for state-of-the-art machine learning models in text, vision, audio, and multimodal models, for both inference and training.",
|
| 43 |
"homepage_link": "https://huggingface.co/transformers",
|
| 44 |
"github_topic_closest_fit": "machine-learning",
|
| 45 |
-
"contributors_all":
|
| 46 |
"contributors_2025": 860,
|
| 47 |
"contributors_2024": 769,
|
| 48 |
-
"contributors_2023": 758
|
|
|
|
| 49 |
},
|
| 50 |
{
|
| 51 |
"repo_name": "sglang",
|
|
@@ -54,10 +58,11 @@
|
|
| 54 |
"github_about_section": "SGLang is a fast serving framework for large language models and vision language models.",
|
| 55 |
"homepage_link": "https://docs.sglang.ai",
|
| 56 |
"github_topic_closest_fit": "inference",
|
| 57 |
-
"contributors_all":
|
| 58 |
"contributors_2025": 796,
|
| 59 |
"contributors_2024": 189,
|
| 60 |
-
"contributors_2023": 1
|
|
|
|
| 61 |
},
|
| 62 |
{
|
| 63 |
"repo_name": "hhvm",
|
|
@@ -69,7 +74,8 @@
|
|
| 69 |
"contributors_all": 2773,
|
| 70 |
"contributors_2025": 692,
|
| 71 |
"contributors_2024": 648,
|
| 72 |
-
"contributors_2023": 604
|
|
|
|
| 73 |
},
|
| 74 |
{
|
| 75 |
"repo_name": "llama.cpp",
|
|
@@ -78,10 +84,11 @@
|
|
| 78 |
"github_about_section": "LLM inference in C/C++",
|
| 79 |
"homepage_link": "https://ggml.ai",
|
| 80 |
"github_topic_closest_fit": "inference",
|
| 81 |
-
"contributors_all":
|
| 82 |
"contributors_2025": 535,
|
| 83 |
"contributors_2024": 575,
|
| 84 |
-
"contributors_2023": 461
|
|
|
|
| 85 |
},
|
| 86 |
{
|
| 87 |
"repo_name": "kubernetes",
|
|
@@ -90,10 +97,11 @@
|
|
| 90 |
"github_about_section": "Production-Grade Container Scheduling and Management",
|
| 91 |
"homepage_link": "https://kubernetes.io",
|
| 92 |
"github_topic_closest_fit": "kubernetes",
|
| 93 |
-
"contributors_all":
|
| 94 |
-
"contributors_2025":
|
| 95 |
"contributors_2024": 499,
|
| 96 |
-
"contributors_2023": 565
|
|
|
|
| 97 |
},
|
| 98 |
{
|
| 99 |
"repo_name": "tensorflow",
|
|
@@ -102,10 +110,11 @@
|
|
| 102 |
"github_about_section": "An Open Source Machine Learning Framework for Everyone",
|
| 103 |
"homepage_link": "https://tensorflow.org",
|
| 104 |
"github_topic_closest_fit": "machine-learning",
|
| 105 |
-
"contributors_all":
|
| 106 |
"contributors_2025": 506,
|
| 107 |
"contributors_2024": 523,
|
| 108 |
-
"contributors_2023": 630
|
|
|
|
| 109 |
},
|
| 110 |
{
|
| 111 |
"repo_name": "verl",
|
|
@@ -114,10 +123,11 @@
|
|
| 114 |
"github_about_section": "verl: Volcano Engine Reinforcement Learning for LLMs",
|
| 115 |
"homepage_link": "https://verl.readthedocs.io",
|
| 116 |
"github_topic_closest_fit": "deep-reinforcement-learning",
|
| 117 |
-
"contributors_all":
|
| 118 |
"contributors_2025": 454,
|
| 119 |
"contributors_2024": 10,
|
| 120 |
-
"contributors_2023": 0
|
|
|
|
| 121 |
},
|
| 122 |
{
|
| 123 |
"repo_name": "rocm-systems",
|
|
@@ -126,10 +136,11 @@
|
|
| 126 |
"github_about_section": "super repo for rocm systems projects",
|
| 127 |
"homepage_link": "https://amd.com/en/products/software/rocm.html",
|
| 128 |
"github_topic_closest_fit": "amd",
|
| 129 |
-
"contributors_all":
|
| 130 |
-
"contributors_2025":
|
| 131 |
"contributors_2024": 351,
|
| 132 |
-
"contributors_2023": 213
|
|
|
|
| 133 |
},
|
| 134 |
{
|
| 135 |
"repo_name": "ray",
|
|
@@ -138,10 +149,11 @@
|
|
| 138 |
"github_about_section": "Ray is an AI compute engine. Ray consists of a core distributed runtime and a set of AI Libraries for accelerating ML workloads.",
|
| 139 |
"homepage_link": "https://ray.io",
|
| 140 |
"github_topic_closest_fit": "machine-learning",
|
| 141 |
-
"contributors_all":
|
| 142 |
"contributors_2025": 397,
|
| 143 |
"contributors_2024": 223,
|
| 144 |
-
"contributors_2023": 230
|
|
|
|
| 145 |
},
|
| 146 |
{
|
| 147 |
"repo_name": "spark",
|
|
@@ -150,10 +162,11 @@
|
|
| 150 |
"github_about_section": "Apache Spark - A unified analytics engine for large-scale data processing",
|
| 151 |
"homepage_link": "https://spark.apache.org",
|
| 152 |
"github_topic_closest_fit": "data-processing",
|
| 153 |
-
"contributors_all":
|
| 154 |
"contributors_2025": 322,
|
| 155 |
"contributors_2024": 300,
|
| 156 |
-
"contributors_2023": 336
|
|
|
|
| 157 |
},
|
| 158 |
{
|
| 159 |
"repo_name": "goose",
|
|
@@ -162,10 +175,11 @@
|
|
| 162 |
"github_about_section": "an open source, extensible AI agent that goes beyond code suggestions - install, execute, edit, and test with any LLM",
|
| 163 |
"homepage_link": "https://block.github.io/goose",
|
| 164 |
"github_topic_closest_fit": "ai-agents",
|
| 165 |
-
"contributors_all":
|
| 166 |
"contributors_2025": 319,
|
| 167 |
"contributors_2024": 32,
|
| 168 |
-
"contributors_2023": 0
|
|
|
|
| 169 |
},
|
| 170 |
{
|
| 171 |
"repo_name": "elasticsearch",
|
|
@@ -174,10 +188,11 @@
|
|
| 174 |
"github_about_section": "Free and Open Source, Distributed, RESTful Search Engine",
|
| 175 |
"homepage_link": "https://elastic.co/products/elasticsearch",
|
| 176 |
"github_topic_closest_fit": "search-engine",
|
| 177 |
-
"contributors_all":
|
| 178 |
"contributors_2025": 316,
|
| 179 |
"contributors_2024": 284,
|
| 180 |
-
"contributors_2023": 270
|
|
|
|
| 181 |
},
|
| 182 |
{
|
| 183 |
"repo_name": "jax",
|
|
@@ -186,10 +201,11 @@
|
|
| 186 |
"github_about_section": "Composable transformations of Python+NumPy programs: differentiate, vectorize, JIT to GPU/TPU, and more",
|
| 187 |
"homepage_link": "https://docs.jax.dev",
|
| 188 |
"github_topic_closest_fit": "scientific-computing",
|
| 189 |
-
"contributors_all":
|
| 190 |
-
"contributors_2025":
|
| 191 |
"contributors_2024": 280,
|
| 192 |
-
"contributors_2023": 202
|
|
|
|
| 193 |
},
|
| 194 |
{
|
| 195 |
"repo_name": "modelcontextprotocol",
|
|
@@ -198,10 +214,11 @@
|
|
| 198 |
"github_about_section": "Specification and documentation for the Model Context Protocol",
|
| 199 |
"homepage_link": "https://modelcontextprotocol.io",
|
| 200 |
"github_topic_closest_fit": "mcp",
|
| 201 |
-
"contributors_all":
|
| 202 |
"contributors_2025": 301,
|
| 203 |
"contributors_2024": 42,
|
| 204 |
-
"contributors_2023": 0
|
|
|
|
| 205 |
},
|
| 206 |
{
|
| 207 |
"repo_name": "executorch",
|
|
@@ -210,10 +227,11 @@
|
|
| 210 |
"github_about_section": "On-device AI across mobile, embedded and edge for PyTorch",
|
| 211 |
"homepage_link": "https://executorch.ai",
|
| 212 |
"github_topic_closest_fit": "inference",
|
| 213 |
-
"contributors_all":
|
| 214 |
"contributors_2025": 267,
|
| 215 |
"contributors_2024": 243,
|
| 216 |
-
"contributors_2023": 77
|
|
|
|
| 217 |
},
|
| 218 |
{
|
| 219 |
"repo_name": "numpy",
|
|
@@ -222,10 +240,11 @@
|
|
| 222 |
"github_about_section": "The fundamental package for scientific computing with Python.",
|
| 223 |
"homepage_link": "https://numpy.org",
|
| 224 |
"github_topic_closest_fit": "scientific-computing",
|
| 225 |
-
"contributors_all":
|
| 226 |
"contributors_2025": 237,
|
| 227 |
"contributors_2024": 233,
|
| 228 |
-
"contributors_2023": 252
|
|
|
|
| 229 |
},
|
| 230 |
{
|
| 231 |
"repo_name": "triton",
|
|
@@ -234,10 +253,11 @@
|
|
| 234 |
"github_about_section": "Development repository for the Triton language and compiler",
|
| 235 |
"homepage_link": "https://triton-lang.org",
|
| 236 |
"github_topic_closest_fit": "parallel-programming",
|
| 237 |
-
"contributors_all":
|
| 238 |
"contributors_2025": 233,
|
| 239 |
"contributors_2024": 206,
|
| 240 |
-
"contributors_2023": 159
|
|
|
|
| 241 |
},
|
| 242 |
{
|
| 243 |
"repo_name": "modular",
|
|
@@ -246,10 +266,11 @@
|
|
| 246 |
"github_about_section": "The Modular Platform (includes MAX & Mojo)",
|
| 247 |
"homepage_link": "https://docs.modular.com",
|
| 248 |
"github_topic_closest_fit": "parallel-programming",
|
| 249 |
-
"contributors_all":
|
| 250 |
"contributors_2025": 222,
|
| 251 |
"contributors_2024": 205,
|
| 252 |
-
"contributors_2023": 99
|
|
|
|
| 253 |
},
|
| 254 |
{
|
| 255 |
"repo_name": "scipy",
|
|
@@ -258,10 +279,11 @@
|
|
| 258 |
"github_about_section": "SciPy library main repository",
|
| 259 |
"homepage_link": "https://scipy.org",
|
| 260 |
"github_topic_closest_fit": "scientific-computing",
|
| 261 |
-
"contributors_all":
|
| 262 |
"contributors_2025": 213,
|
| 263 |
"contributors_2024": 251,
|
| 264 |
-
"contributors_2023": 245
|
|
|
|
| 265 |
},
|
| 266 |
{
|
| 267 |
"repo_name": "ollama",
|
|
@@ -270,10 +292,11 @@
|
|
| 270 |
"github_about_section": "Get up and running with OpenAI gpt-oss, DeepSeek-R1, Gemma 3 and other models.",
|
| 271 |
"homepage_link": "https://ollama.com",
|
| 272 |
"github_topic_closest_fit": "inference",
|
| 273 |
-
"contributors_all":
|
| 274 |
"contributors_2025": 202,
|
| 275 |
"contributors_2024": 314,
|
| 276 |
-
"contributors_2023": 97
|
|
|
|
| 277 |
},
|
| 278 |
{
|
| 279 |
"repo_name": "trl",
|
|
@@ -282,10 +305,11 @@
|
|
| 282 |
"github_about_section": "Train transformer language models with reinforcement learning.",
|
| 283 |
"homepage_link": "http://hf.co/docs/trl",
|
| 284 |
"github_topic_closest_fit": "reinforcement-learning",
|
| 285 |
-
"contributors_all":
|
| 286 |
"contributors_2025": 189,
|
| 287 |
"contributors_2024": 154,
|
| 288 |
-
"contributors_2023": 122
|
|
|
|
| 289 |
},
|
| 290 |
{
|
| 291 |
"repo_name": "flashinfer",
|
|
@@ -294,10 +318,11 @@
|
|
| 294 |
"github_about_section": "FlashInfer: Kernel Library for LLM Serving",
|
| 295 |
"homepage_link": "https://flashinfer.ai",
|
| 296 |
"github_topic_closest_fit": "attention",
|
| 297 |
-
"contributors_all":
|
| 298 |
"contributors_2025": 158,
|
| 299 |
"contributors_2024": 50,
|
| 300 |
-
"contributors_2023": 11
|
|
|
|
| 301 |
},
|
| 302 |
{
|
| 303 |
"repo_name": "aiter",
|
|
@@ -305,10 +330,11 @@
|
|
| 305 |
"category": "gpu kernels",
|
| 306 |
"github_about_section": "AI Tensor Engine for ROCm",
|
| 307 |
"homepage_link": "https://rocm.blogs.amd.com/software-tools-optimization/aiter-ai-tensor-engine/README.html",
|
| 308 |
-
"contributors_all":
|
| 309 |
"contributors_2025": 145,
|
| 310 |
"contributors_2024": 10,
|
| 311 |
-
"contributors_2023": 0
|
|
|
|
| 312 |
},
|
| 313 |
{
|
| 314 |
"repo_name": "LMCache",
|
|
@@ -316,10 +342,11 @@
|
|
| 316 |
"category": "inference",
|
| 317 |
"github_about_section": "Supercharge Your LLM with the Fastest KV Cache Layer",
|
| 318 |
"homepage_link": "https://lmcache.ai",
|
| 319 |
-
"contributors_all":
|
| 320 |
"contributors_2025": 144,
|
| 321 |
"contributors_2024": 18,
|
| 322 |
-
"contributors_2023": 0
|
|
|
|
| 323 |
},
|
| 324 |
{
|
| 325 |
"repo_name": "Mooncake",
|
|
@@ -328,10 +355,11 @@
|
|
| 328 |
"github_about_section": "Mooncake is the serving platform for Kimi, a leading LLM service provided by Moonshot AI.",
|
| 329 |
"homepage_link": "https://kvcache-ai.github.io/Mooncake",
|
| 330 |
"github_topic_closest_fit": "inference",
|
| 331 |
-
"contributors_all":
|
| 332 |
"contributors_2025": 133,
|
| 333 |
"contributors_2024": 13,
|
| 334 |
-
"contributors_2023": 0
|
|
|
|
| 335 |
},
|
| 336 |
{
|
| 337 |
"repo_name": "torchtitan",
|
|
@@ -339,10 +367,11 @@
|
|
| 339 |
"category": "training framework",
|
| 340 |
"github_about_section": "A PyTorch native platform for training generative AI models",
|
| 341 |
"homepage_link": "https://arxiv.org/abs/2410.06511",
|
| 342 |
-
"contributors_all":
|
| 343 |
"contributors_2025": 119,
|
| 344 |
"contributors_2024": 43,
|
| 345 |
-
"contributors_2023": 1
|
|
|
|
| 346 |
},
|
| 347 |
{
|
| 348 |
"repo_name": "ao",
|
|
@@ -351,10 +380,11 @@
|
|
| 351 |
"github_about_section": "PyTorch native quantization and sparsity for training and inference",
|
| 352 |
"homepage_link": "https://pytorch.org/ao",
|
| 353 |
"github_topic_closest_fit": "quantization",
|
| 354 |
-
"contributors_all":
|
| 355 |
"contributors_2025": 114,
|
| 356 |
"contributors_2024": 100,
|
| 357 |
-
"contributors_2023": 5
|
|
|
|
| 358 |
},
|
| 359 |
{
|
| 360 |
"repo_name": "ComfyUI",
|
|
@@ -363,10 +393,11 @@
|
|
| 363 |
"github_about_section": "The most powerful and modular diffusion model GUI, api and backend with a graph/nodes interface.",
|
| 364 |
"homepage_link": "https://comfy.org",
|
| 365 |
"github_topic_closest_fit": "stable-diffusion",
|
| 366 |
-
"contributors_all":
|
| 367 |
"contributors_2025": 108,
|
| 368 |
"contributors_2024": 119,
|
| 369 |
-
"contributors_2023": 94
|
|
|
|
| 370 |
},
|
| 371 |
{
|
| 372 |
"repo_name": "unsloth",
|
|
@@ -375,10 +406,11 @@
|
|
| 375 |
"github_about_section": "Fine-tuning & Reinforcement Learning for LLMs. Train OpenAI gpt-oss, DeepSeek-R1, Qwen3, Gemma 3, TTS 2x faster with 70% less VRAM.",
|
| 376 |
"homepage_link": "https://docs.unsloth.ai",
|
| 377 |
"github_topic_closest_fit": "fine-tuning",
|
| 378 |
-
"contributors_all":
|
| 379 |
"contributors_2025": 108,
|
| 380 |
"contributors_2024": 29,
|
| 381 |
-
"contributors_2023": 3
|
|
|
|
| 382 |
},
|
| 383 |
{
|
| 384 |
"repo_name": "accelerate",
|
|
@@ -386,10 +418,11 @@
|
|
| 386 |
"category": "training framework",
|
| 387 |
"github_about_section": "A simple way to launch, train, and use PyTorch models on almost any device and distributed configuration, automatic mixed precision (including fp8), and easy-to-configure FSDP and DeepSpeed support.",
|
| 388 |
"homepage_link": "https://huggingface.co/docs/accelerate",
|
| 389 |
-
"contributors_all":
|
| 390 |
"contributors_2025": 97,
|
| 391 |
"contributors_2024": 124,
|
| 392 |
-
"contributors_2023": 149
|
|
|
|
| 393 |
},
|
| 394 |
{
|
| 395 |
"repo_name": "terminal-bench",
|
|
@@ -401,7 +434,8 @@
|
|
| 401 |
"contributors_all": 96,
|
| 402 |
"contributors_2025": 96,
|
| 403 |
"contributors_2024": 0,
|
| 404 |
-
"contributors_2023": 0
|
|
|
|
| 405 |
},
|
| 406 |
{
|
| 407 |
"repo_name": "DeepSpeed",
|
|
@@ -409,10 +443,11 @@
|
|
| 409 |
"category": "training framework",
|
| 410 |
"github_about_section": "DeepSpeed is a deep learning optimization library that makes distributed training and inference easy, efficient, and effective.",
|
| 411 |
"homepage_link": "https://deepspeed.ai",
|
| 412 |
-
"contributors_all":
|
| 413 |
"contributors_2025": 96,
|
| 414 |
"contributors_2024": 134,
|
| 415 |
-
"contributors_2023": 165
|
|
|
|
| 416 |
},
|
| 417 |
{
|
| 418 |
"repo_name": "milvus",
|
|
@@ -421,10 +456,11 @@
|
|
| 421 |
"github_about_section": "Milvus is a high-performance, cloud-native vector database built for scalable vector ANN search",
|
| 422 |
"homepage_link": "https://milvus.io",
|
| 423 |
"github_topic_closest_fit": "vector-search",
|
| 424 |
-
"contributors_all":
|
| 425 |
"contributors_2025": 95,
|
| 426 |
"contributors_2024": 84,
|
| 427 |
-
"contributors_2023": 72
|
|
|
|
| 428 |
},
|
| 429 |
{
|
| 430 |
"repo_name": "cutlass",
|
|
@@ -433,10 +469,11 @@
|
|
| 433 |
"github_about_section": "CUDA Templates and Python DSLs for High-Performance Linear Algebra",
|
| 434 |
"homepage_link": "https://docs.nvidia.com/cutlass/index.html",
|
| 435 |
"github_topic_closest_fit": "parallel-programming",
|
| 436 |
-
"contributors_all":
|
| 437 |
"contributors_2025": 94,
|
| 438 |
"contributors_2024": 64,
|
| 439 |
-
"contributors_2023": 66
|
|
|
|
| 440 |
},
|
| 441 |
{
|
| 442 |
"repo_name": "tilelang",
|
|
@@ -445,10 +482,11 @@
|
|
| 445 |
"github_about_section": "Domain-specific language designed to streamline the development of high-performance GPU/CPU/Accelerators kernels",
|
| 446 |
"homepage_link": "https://tilelang.com",
|
| 447 |
"github_topic_closest_fit": "parallel-programming",
|
| 448 |
-
"contributors_all":
|
| 449 |
"contributors_2025": 89,
|
| 450 |
"contributors_2024": 1,
|
| 451 |
-
"contributors_2023": 0
|
|
|
|
| 452 |
},
|
| 453 |
{
|
| 454 |
"repo_name": "monarch",
|
|
@@ -456,10 +494,11 @@
|
|
| 456 |
"category": "distributed computing",
|
| 457 |
"github_about_section": "PyTorch Single Controller",
|
| 458 |
"homepage_link": "https://meta-pytorch.org/monarch",
|
| 459 |
-
"contributors_all":
|
| 460 |
"contributors_2025": 85,
|
| 461 |
"contributors_2024": 0,
|
| 462 |
-
"contributors_2023": 0
|
|
|
|
| 463 |
},
|
| 464 |
{
|
| 465 |
"repo_name": "Liger-Kernel",
|
|
@@ -468,10 +507,11 @@
|
|
| 468 |
"github_about_section": "Efficient Triton Kernels for LLM Training",
|
| 469 |
"homepage_link": "https://openreview.net/pdf?id=36SjAIT42G",
|
| 470 |
"github_topic_closest_fit": "triton",
|
| 471 |
-
"contributors_all":
|
| 472 |
"contributors_2025": 78,
|
| 473 |
"contributors_2024": 61,
|
| 474 |
-
"contributors_2023": 0
|
|
|
|
| 475 |
},
|
| 476 |
{
|
| 477 |
"repo_name": "hipBLASLt",
|
|
@@ -483,7 +523,8 @@
|
|
| 483 |
"contributors_all": 111,
|
| 484 |
"contributors_2025": 69,
|
| 485 |
"contributors_2024": 70,
|
| 486 |
-
"contributors_2023": 35
|
|
|
|
| 487 |
},
|
| 488 |
{
|
| 489 |
"repo_name": "peft",
|
|
@@ -491,10 +532,11 @@
|
|
| 491 |
"category": "fine tuning",
|
| 492 |
"github_about_section": "PEFT: State-of-the-art Parameter-Efficient Fine-Tuning.",
|
| 493 |
"homepage_link": "https://huggingface.co/docs/peft",
|
| 494 |
-
"contributors_all":
|
| 495 |
"contributors_2025": 69,
|
| 496 |
"contributors_2024": 111,
|
| 497 |
-
"contributors_2023": 115
|
|
|
|
| 498 |
},
|
| 499 |
{
|
| 500 |
"repo_name": "ROCm",
|
|
@@ -502,10 +544,11 @@
|
|
| 502 |
"category": "multi-purpose library",
|
| 503 |
"github_about_section": "AMD ROCm Software - GitHub Home",
|
| 504 |
"homepage_link": "https://rocm.docs.amd.com",
|
| 505 |
-
"contributors_all":
|
| 506 |
"contributors_2025": 67,
|
| 507 |
"contributors_2024": 61,
|
| 508 |
-
"contributors_2023": 44
|
|
|
|
| 509 |
},
|
| 510 |
{
|
| 511 |
"repo_name": "mcp-agent",
|
|
@@ -516,7 +559,8 @@
|
|
| 516 |
"contributors_all": 64,
|
| 517 |
"contributors_2025": 63,
|
| 518 |
"contributors_2024": 1,
|
| 519 |
-
"contributors_2023": 0
|
|
|
|
| 520 |
},
|
| 521 |
{
|
| 522 |
"repo_name": "onnx",
|
|
@@ -525,10 +569,11 @@
|
|
| 525 |
"github_about_section": "Open standard for machine learning interoperability",
|
| 526 |
"homepage_link": "https://onnx.ai",
|
| 527 |
"github_topic_closest_fit": "onnx",
|
| 528 |
-
"contributors_all":
|
| 529 |
"contributors_2025": 56,
|
| 530 |
"contributors_2024": 45,
|
| 531 |
-
"contributors_2023": 61
|
|
|
|
| 532 |
},
|
| 533 |
{
|
| 534 |
"repo_name": "letta",
|
|
@@ -540,7 +585,8 @@
|
|
| 540 |
"contributors_all": 159,
|
| 541 |
"contributors_2025": 57,
|
| 542 |
"contributors_2024": 75,
|
| 543 |
-
"contributors_2023": 47
|
|
|
|
| 544 |
},
|
| 545 |
{
|
| 546 |
"repo_name": "helion",
|
|
@@ -549,10 +595,11 @@
|
|
| 549 |
"github_about_section": "A Python-embedded DSL that makes it easy to write fast, scalable ML kernels with minimal boilerplate.",
|
| 550 |
"homepage_link": "https://helionlang.com",
|
| 551 |
"github_topic_closest_fit": "parallel-programming",
|
| 552 |
-
"contributors_all":
|
| 553 |
"contributors_2025": 49,
|
| 554 |
"contributors_2024": 0,
|
| 555 |
-
"contributors_2023": 0
|
|
|
|
| 556 |
},
|
| 557 |
{
|
| 558 |
"repo_name": "openevolve",
|
|
@@ -563,7 +610,8 @@
|
|
| 563 |
"contributors_all": 51,
|
| 564 |
"contributors_2025": 46,
|
| 565 |
"contributors_2024": 0,
|
| 566 |
-
"contributors_2023": 0
|
|
|
|
| 567 |
},
|
| 568 |
{
|
| 569 |
"repo_name": "lightning-thunder",
|
|
@@ -573,7 +621,8 @@
|
|
| 573 |
"contributors_all": 79,
|
| 574 |
"contributors_2025": 44,
|
| 575 |
"contributors_2024": 47,
|
| 576 |
-
"contributors_2023": 29
|
|
|
|
| 577 |
},
|
| 578 |
{
|
| 579 |
"repo_name": "truss",
|
|
@@ -585,7 +634,8 @@
|
|
| 585 |
"contributors_all": 84,
|
| 586 |
"contributors_2025": 44,
|
| 587 |
"contributors_2024": 30,
|
| 588 |
-
"contributors_2023": 21
|
|
|
|
| 589 |
},
|
| 590 |
{
|
| 591 |
"repo_name": "cuda-python",
|
|
@@ -597,7 +647,8 @@
|
|
| 597 |
"contributors_all": 54,
|
| 598 |
"contributors_2025": 41,
|
| 599 |
"contributors_2024": 12,
|
| 600 |
-
"contributors_2023": 1
|
|
|
|
| 601 |
},
|
| 602 |
{
|
| 603 |
"repo_name": "warp",
|
|
@@ -606,10 +657,11 @@
|
|
| 606 |
"github_about_section": "A Python framework for accelerated simulation, data generation and spatial computing.",
|
| 607 |
"homepage_link": "https://nvidia.github.io/warp",
|
| 608 |
"github_topic_closest_fit": "physics-simulation",
|
| 609 |
-
"contributors_all":
|
| 610 |
"contributors_2025": 40,
|
| 611 |
"contributors_2024": 29,
|
| 612 |
-
"contributors_2023": 17
|
|
|
|
| 613 |
},
|
| 614 |
{
|
| 615 |
"repo_name": "metaflow",
|
|
@@ -620,7 +672,8 @@
|
|
| 620 |
"contributors_all": 132,
|
| 621 |
"contributors_2025": 37,
|
| 622 |
"contributors_2024": 35,
|
| 623 |
-
"contributors_2023": 28
|
|
|
|
| 624 |
},
|
| 625 |
{
|
| 626 |
"repo_name": "numba",
|
|
@@ -628,10 +681,11 @@
|
|
| 628 |
"category": "compiler",
|
| 629 |
"github_about_section": "NumPy aware dynamic Python compiler using LLVM",
|
| 630 |
"homepage_link": "https://numba.pydata.org",
|
| 631 |
-
"contributors_all":
|
| 632 |
"contributors_2025": 40,
|
| 633 |
"contributors_2024": 32,
|
| 634 |
-
"contributors_2023": 55
|
|
|
|
| 635 |
},
|
| 636 |
{
|
| 637 |
"repo_name": "SWE-bench",
|
|
@@ -643,7 +697,8 @@
|
|
| 643 |
"contributors_all": 66,
|
| 644 |
"contributors_2025": 33,
|
| 645 |
"contributors_2024": 37,
|
| 646 |
-
"contributors_2023": 9
|
|
|
|
| 647 |
},
|
| 648 |
{
|
| 649 |
"repo_name": "Triton-distributed",
|
|
@@ -651,10 +706,11 @@
|
|
| 651 |
"category": "distributed computing",
|
| 652 |
"github_about_section": "Distributed Compiler based on Triton for Parallel Systems",
|
| 653 |
"homepage_link": "https://triton-distributed.readthedocs.io",
|
| 654 |
-
"contributors_all":
|
| 655 |
"contributors_2025": 30,
|
| 656 |
"contributors_2024": 0,
|
| 657 |
-
"contributors_2023": 0
|
|
|
|
| 658 |
},
|
| 659 |
{
|
| 660 |
"repo_name": "ThunderKittens",
|
|
@@ -666,7 +722,8 @@
|
|
| 666 |
"contributors_all": 37,
|
| 667 |
"contributors_2025": 29,
|
| 668 |
"contributors_2024": 13,
|
| 669 |
-
"contributors_2023": 0
|
|
|
|
| 670 |
},
|
| 671 |
{
|
| 672 |
"repo_name": "dstack",
|
|
@@ -678,7 +735,8 @@
|
|
| 678 |
"contributors_all": 69,
|
| 679 |
"contributors_2025": 28,
|
| 680 |
"contributors_2024": 42,
|
| 681 |
-
"contributors_2023": 14
|
|
|
|
| 682 |
},
|
| 683 |
{
|
| 684 |
"repo_name": "ome",
|
|
@@ -690,7 +748,8 @@
|
|
| 690 |
"contributors_all": 31,
|
| 691 |
"contributors_2025": 28,
|
| 692 |
"contributors_2024": 0,
|
| 693 |
-
"contributors_2023": 0
|
|
|
|
| 694 |
},
|
| 695 |
{
|
| 696 |
"repo_name": "server",
|
|
@@ -699,10 +758,11 @@
|
|
| 699 |
"github_about_section": "The Triton Inference Server provides an optimized cloud and edge inferencing solution.",
|
| 700 |
"homepage_link": "https://docs.nvidia.com/deeplearning/triton-inference-server/user-guide/docs/index.html",
|
| 701 |
"github_topic_closest_fit": "inference",
|
| 702 |
-
"contributors_all":
|
| 703 |
"contributors_2025": 24,
|
| 704 |
"contributors_2024": 36,
|
| 705 |
-
"contributors_2023": 34
|
|
|
|
| 706 |
},
|
| 707 |
{
|
| 708 |
"repo_name": "ccache",
|
|
@@ -710,10 +770,11 @@
|
|
| 710 |
"category": "compiler",
|
| 711 |
"github_about_section": "ccache - a fast compiler cache",
|
| 712 |
"homepage_link": "https://ccache.dev",
|
| 713 |
-
"contributors_all":
|
| 714 |
"contributors_2025": 20,
|
| 715 |
"contributors_2024": 28,
|
| 716 |
-
"contributors_2023": 22
|
|
|
|
| 717 |
},
|
| 718 |
{
|
| 719 |
"repo_name": "lapack",
|
|
@@ -725,17 +786,19 @@
|
|
| 725 |
"contributors_all": 187,
|
| 726 |
"contributors_2025": 23,
|
| 727 |
"contributors_2024": 25,
|
| 728 |
-
"contributors_2023": 42
|
|
|
|
| 729 |
},
|
| 730 |
{
|
| 731 |
"repo_name": "quack",
|
| 732 |
"repo_link": "https://github.com/Dao-AILab/quack",
|
| 733 |
"category": "kernel examples",
|
| 734 |
"github_about_section": "A Quirky Assortment of CuTe Kernels",
|
| 735 |
-
"contributors_all":
|
| 736 |
"contributors_2025": 17,
|
| 737 |
"contributors_2024": 0,
|
| 738 |
-
"contributors_2023": 0
|
|
|
|
| 739 |
},
|
| 740 |
{
|
| 741 |
"repo_name": "KernelBench",
|
|
@@ -747,7 +810,8 @@
|
|
| 747 |
"contributors_all": 21,
|
| 748 |
"contributors_2025": 16,
|
| 749 |
"contributors_2024": 3,
|
| 750 |
-
"contributors_2023": 0
|
|
|
|
| 751 |
},
|
| 752 |
{
|
| 753 |
"repo_name": "reference-kernels",
|
|
@@ -755,10 +819,11 @@
|
|
| 755 |
"category": "kernel examples",
|
| 756 |
"github_about_section": "Official Problem Sets / Reference Kernels for the GPU MODE Leaderboard!",
|
| 757 |
"homepage_link": "https://gpumode.com",
|
| 758 |
-
"contributors_all":
|
| 759 |
"contributors_2025": 16,
|
| 760 |
"contributors_2024": 0,
|
| 761 |
-
"contributors_2023": 0
|
|
|
|
| 762 |
},
|
| 763 |
{
|
| 764 |
"repo_name": "synthetic-data-kit",
|
|
@@ -770,7 +835,8 @@
|
|
| 770 |
"contributors_all": 15,
|
| 771 |
"contributors_2025": 15,
|
| 772 |
"contributors_2024": 0,
|
| 773 |
-
"contributors_2023": 0
|
|
|
|
| 774 |
},
|
| 775 |
{
|
| 776 |
"repo_name": "tritonparse",
|
|
@@ -778,20 +844,22 @@
|
|
| 778 |
"category": "performance testing",
|
| 779 |
"github_about_section": "TritonParse: A Compiler Tracer, Visualizer, and Reproducer for Triton Kernels",
|
| 780 |
"homepage_link": "https://meta-pytorch.org/tritonparse",
|
| 781 |
-
"contributors_all":
|
| 782 |
"contributors_2025": 15,
|
| 783 |
"contributors_2024": 0,
|
| 784 |
-
"contributors_2023": 0
|
|
|
|
| 785 |
},
|
| 786 |
{
|
| 787 |
"repo_name": "kernels",
|
| 788 |
"repo_link": "https://github.com/huggingface/kernels",
|
| 789 |
"category": "gpu kernels",
|
| 790 |
"github_about_section": "Load compute kernels from the Hub",
|
| 791 |
-
"contributors_all":
|
| 792 |
"contributors_2025": 14,
|
| 793 |
"contributors_2024": 2,
|
| 794 |
-
"contributors_2023": 0
|
|
|
|
| 795 |
},
|
| 796 |
{
|
| 797 |
"repo_name": "Wan2.2",
|
|
@@ -803,7 +871,8 @@
|
|
| 803 |
"contributors_all": 16,
|
| 804 |
"contributors_2025": 14,
|
| 805 |
"contributors_2024": 0,
|
| 806 |
-
"contributors_2023": 0
|
|
|
|
| 807 |
},
|
| 808 |
{
|
| 809 |
"repo_name": "Primus-Turbo",
|
|
@@ -813,7 +882,8 @@
|
|
| 813 |
"contributors_all": 14,
|
| 814 |
"contributors_2025": 12,
|
| 815 |
"contributors_2024": 0,
|
| 816 |
-
"contributors_2023": 0
|
|
|
|
| 817 |
},
|
| 818 |
{
|
| 819 |
"repo_name": "flashinfer-bench",
|
|
@@ -822,10 +892,11 @@
|
|
| 822 |
"github_about_section": "Building the Virtuous Cycle for AI-driven LLM Systems",
|
| 823 |
"homepage_link": "https://bench.flashinfer.ai",
|
| 824 |
"github_topic_closest_fit": "benchmark",
|
| 825 |
-
"contributors_all":
|
| 826 |
"contributors_2025": 11,
|
| 827 |
"contributors_2024": 0,
|
| 828 |
-
"contributors_2023": 0
|
|
|
|
| 829 |
},
|
| 830 |
{
|
| 831 |
"repo_name": "FTorch",
|
|
@@ -837,7 +908,8 @@
|
|
| 837 |
"contributors_all": 22,
|
| 838 |
"contributors_2025": 12,
|
| 839 |
"contributors_2024": 8,
|
| 840 |
-
"contributors_2023": 9
|
|
|
|
| 841 |
},
|
| 842 |
{
|
| 843 |
"repo_name": "TensorRT",
|
|
@@ -848,7 +920,8 @@
|
|
| 848 |
"contributors_all": 104,
|
| 849 |
"contributors_2025": 10,
|
| 850 |
"contributors_2024": 18,
|
| 851 |
-
"contributors_2023": 19
|
|
|
|
| 852 |
},
|
| 853 |
{
|
| 854 |
"repo_name": "TileIR",
|
|
@@ -859,7 +932,8 @@
|
|
| 859 |
"contributors_all": 10,
|
| 860 |
"contributors_2025": 10,
|
| 861 |
"contributors_2024": 1,
|
| 862 |
-
"contributors_2023": 0
|
|
|
|
| 863 |
},
|
| 864 |
{
|
| 865 |
"repo_name": "kernels-community",
|
|
@@ -867,10 +941,11 @@
|
|
| 867 |
"category": "gpu kernels",
|
| 868 |
"homepage_link": "https://huggingface.co/kernels-community",
|
| 869 |
"github_about_section": "Kernel sources for https://huggingface.co/kernels-community",
|
| 870 |
-
"contributors_all":
|
| 871 |
"contributors_2025": 9,
|
| 872 |
"contributors_2024": 0,
|
| 873 |
-
"contributors_2023": 0
|
|
|
|
| 874 |
},
|
| 875 |
{
|
| 876 |
"repo_name": "GEAK-agent",
|
|
@@ -878,10 +953,11 @@
|
|
| 878 |
"category": "agent",
|
| 879 |
"github_about_section": "It is an LLM-based AI agent, which can write correct and efficient gpu kernels automatically.",
|
| 880 |
"github_topic_closest_fit": "ai-agents",
|
| 881 |
-
"contributors_all":
|
| 882 |
"contributors_2025": 9,
|
| 883 |
"contributors_2024": 0,
|
| 884 |
-
"contributors_2023": 0
|
|
|
|
| 885 |
},
|
| 886 |
{
|
| 887 |
"repo_name": "intelliperf",
|
|
@@ -893,7 +969,8 @@
|
|
| 893 |
"contributors_all": 7,
|
| 894 |
"contributors_2025": 7,
|
| 895 |
"contributors_2024": 0,
|
| 896 |
-
"contributors_2023": 0
|
|
|
|
| 897 |
},
|
| 898 |
{
|
| 899 |
"repo_name": "cudnn-frontend",
|
|
@@ -905,7 +982,8 @@
|
|
| 905 |
"contributors_all": 14,
|
| 906 |
"contributors_2025": 6,
|
| 907 |
"contributors_2024": 5,
|
| 908 |
-
"contributors_2023": 1
|
|
|
|
| 909 |
},
|
| 910 |
{
|
| 911 |
"repo_name": "BitBLAS",
|
|
@@ -916,7 +994,8 @@
|
|
| 916 |
"contributors_all": 17,
|
| 917 |
"contributors_2025": 5,
|
| 918 |
"contributors_2024": 14,
|
| 919 |
-
"contributors_2023": 0
|
|
|
|
| 920 |
},
|
| 921 |
{
|
| 922 |
"repo_name": "Self-Forcing",
|
|
@@ -928,7 +1007,8 @@
|
|
| 928 |
"contributors_all": 4,
|
| 929 |
"contributors_2025": 4,
|
| 930 |
"contributors_2024": 0,
|
| 931 |
-
"contributors_2023": 0
|
|
|
|
| 932 |
},
|
| 933 |
{
|
| 934 |
"repo_name": "TritonBench",
|
|
@@ -940,7 +1020,8 @@
|
|
| 940 |
"contributors_all": 3,
|
| 941 |
"contributors_2025": 3,
|
| 942 |
"contributors_2024": 0,
|
| 943 |
-
"contributors_2023": 0
|
|
|
|
| 944 |
},
|
| 945 |
{
|
| 946 |
"repo_name": "hatchet",
|
|
@@ -952,7 +1033,8 @@
|
|
| 952 |
"contributors_all": 25,
|
| 953 |
"contributors_2025": 3,
|
| 954 |
"contributors_2024": 6,
|
| 955 |
-
"contributors_2023": 8
|
|
|
|
| 956 |
},
|
| 957 |
{
|
| 958 |
"repo_name": "streamv2v",
|
|
@@ -964,7 +1046,8 @@
|
|
| 964 |
"contributors_all": 7,
|
| 965 |
"contributors_2025": 3,
|
| 966 |
"contributors_2024": 6,
|
| 967 |
-
"contributors_2023": 0
|
|
|
|
| 968 |
},
|
| 969 |
{
|
| 970 |
"repo_name": "mistral-inference",
|
|
@@ -976,7 +1059,8 @@
|
|
| 976 |
"contributors_all": 30,
|
| 977 |
"contributors_2025": 2,
|
| 978 |
"contributors_2024": 17,
|
| 979 |
-
"contributors_2023": 14
|
|
|
|
| 980 |
},
|
| 981 |
{
|
| 982 |
"repo_name": "omnitrace",
|
|
@@ -988,7 +1072,8 @@
|
|
| 988 |
"contributors_all": 16,
|
| 989 |
"contributors_2025": 2,
|
| 990 |
"contributors_2024": 12,
|
| 991 |
-
"contributors_2023": 2
|
|
|
|
| 992 |
},
|
| 993 |
{
|
| 994 |
"repo_name": "IMO2025",
|
|
@@ -1000,7 +1085,8 @@
|
|
| 1000 |
"contributors_all": 2,
|
| 1001 |
"contributors_2025": 2,
|
| 1002 |
"contributors_2024": 0,
|
| 1003 |
-
"contributors_2023": 0
|
|
|
|
| 1004 |
},
|
| 1005 |
{
|
| 1006 |
"repo_name": "RaBitQ",
|
|
@@ -1012,7 +1098,8 @@
|
|
| 1012 |
"contributors_all": 2,
|
| 1013 |
"contributors_2025": 2,
|
| 1014 |
"contributors_2024": 1,
|
| 1015 |
-
"contributors_2023": 0
|
|
|
|
| 1016 |
},
|
| 1017 |
{
|
| 1018 |
"repo_name": "torchdendrite",
|
|
@@ -1022,7 +1109,8 @@
|
|
| 1022 |
"contributors_all": 2,
|
| 1023 |
"contributors_2025": 1,
|
| 1024 |
"contributors_2024": 1,
|
| 1025 |
-
"contributors_2023": 0
|
|
|
|
| 1026 |
},
|
| 1027 |
{
|
| 1028 |
"repo_name": "triton-runner",
|
|
@@ -1033,7 +1121,8 @@
|
|
| 1033 |
"contributors_all": 2,
|
| 1034 |
"contributors_2025": 1,
|
| 1035 |
"contributors_2024": 0,
|
| 1036 |
-
"contributors_2023": 0
|
|
|
|
| 1037 |
},
|
| 1038 |
{
|
| 1039 |
"repo_name": "triSYCL",
|
|
@@ -1045,7 +1134,8 @@
|
|
| 1045 |
"contributors_all": 31,
|
| 1046 |
"contributors_2025": 0,
|
| 1047 |
"contributors_2024": 1,
|
| 1048 |
-
"contributors_2023": 3
|
|
|
|
| 1049 |
},
|
| 1050 |
{
|
| 1051 |
"repo_name": "StreamDiffusion",
|
|
@@ -1057,7 +1147,8 @@
|
|
| 1057 |
"contributors_all": 29,
|
| 1058 |
"contributors_2025": 0,
|
| 1059 |
"contributors_2024": 9,
|
| 1060 |
-
"contributors_2023": 25
|
|
|
|
| 1061 |
},
|
| 1062 |
{
|
| 1063 |
"repo_name": "wandb",
|
|
@@ -1065,10 +1156,11 @@
|
|
| 1065 |
"category": "ml visualization",
|
| 1066 |
"github_about_section": "The AI developer platform. Use Weights & Biases to train and fine-tune models, and manage models from experimentation to production.",
|
| 1067 |
"homepage_link": "https://wandb.ai",
|
| 1068 |
-
"contributors_all":
|
| 1069 |
"contributors_2025": 46,
|
| 1070 |
"contributors_2024": 67,
|
| 1071 |
-
"contributors_2023": 62
|
|
|
|
| 1072 |
},
|
| 1073 |
{
|
| 1074 |
"repo_name": "aws-neuron-sdk",
|
|
@@ -1076,10 +1168,11 @@
|
|
| 1076 |
"category": "sdk",
|
| 1077 |
"github_about_section": "Powering AWS purpose-built machine learning chips. Blazing fast and cost effective, natively integrated into PyTorch and TensorFlow and integrated with your favorite AWS services",
|
| 1078 |
"homepage_link": "https://aws.amazon.com/ai/machine-learning/neuron",
|
| 1079 |
-
"contributors_all":
|
| 1080 |
"contributors_2025": 33,
|
| 1081 |
"contributors_2024": 37,
|
| 1082 |
-
"contributors_2023": 32
|
|
|
|
| 1083 |
},
|
| 1084 |
{
|
| 1085 |
"repo_name": "onnxruntime",
|
|
@@ -1087,10 +1180,11 @@
|
|
| 1087 |
"category": "machine learning interoperability",
|
| 1088 |
"github_about_section": "ONNX Runtime: cross-platform, high performance ML inferencing and training accelerator",
|
| 1089 |
"homepage_link": "https://onnxruntime.ai",
|
| 1090 |
-
"contributors_all":
|
| 1091 |
"contributors_2025": 237,
|
| 1092 |
"contributors_2024": 213,
|
| 1093 |
-
"contributors_2023": 213
|
|
|
|
| 1094 |
},
|
| 1095 |
{
|
| 1096 |
"repo_name": "ort",
|
|
@@ -1101,7 +1195,8 @@
|
|
| 1101 |
"contributors_all": 70,
|
| 1102 |
"contributors_2025": 25,
|
| 1103 |
"contributors_2024": 20,
|
| 1104 |
-
"contributors_2023": 21
|
|
|
|
| 1105 |
},
|
| 1106 |
{
|
| 1107 |
"repo_name": "Triton-distributed",
|
|
@@ -1109,10 +1204,11 @@
|
|
| 1109 |
"category": "distributed computing",
|
| 1110 |
"github_about_section": "Distributed Compiler based on Triton for Parallel Systems",
|
| 1111 |
"homepage_link": "https://triton-distributed.readthedocs.io",
|
| 1112 |
-
"contributors_all":
|
| 1113 |
"contributors_2025": 30,
|
| 1114 |
"contributors_2024": 0,
|
| 1115 |
-
"contributors_2023": 0
|
|
|
|
| 1116 |
},
|
| 1117 |
{
|
| 1118 |
"repo_name": "gemlite",
|
|
@@ -1122,7 +1218,8 @@
|
|
| 1122 |
"contributors_all": 5,
|
| 1123 |
"contributors_2025": 1,
|
| 1124 |
"contributors_2024": 5,
|
| 1125 |
-
"contributors_2023": 0
|
|
|
|
| 1126 |
},
|
| 1127 |
{
|
| 1128 |
"repo_name": "cutile-python",
|
|
@@ -1130,10 +1227,11 @@
|
|
| 1130 |
"category": "parallel computing",
|
| 1131 |
"github_about_section": "cuTile is a programming model for writing parallel kernels for NVIDIA GPUs",
|
| 1132 |
"homepage_link": "https://docs.nvidia.com/cuda/cutile-python",
|
| 1133 |
-
"contributors_all":
|
| 1134 |
"contributors_2025": 10,
|
| 1135 |
"contributors_2024": 0,
|
| 1136 |
-
"contributors_2023": 0
|
|
|
|
| 1137 |
},
|
| 1138 |
{
|
| 1139 |
"repo_name": "tilus",
|
|
@@ -1141,10 +1239,11 @@
|
|
| 1141 |
"category": "parallel computing",
|
| 1142 |
"github_about_section": "Tilus is a tile-level kernel programming language with explicit control over shared memory and registers.",
|
| 1143 |
"homepage_link": "https://nvidia.github.io/tilus",
|
| 1144 |
-
"contributors_all":
|
| 1145 |
"contributors_2025": 4,
|
| 1146 |
"contributors_2024": 0,
|
| 1147 |
-
"contributors_2023": 0
|
|
|
|
| 1148 |
},
|
| 1149 |
{
|
| 1150 |
"repo_name": "triton-windows",
|
|
@@ -1154,111 +1253,193 @@
|
|
| 1154 |
"contributors_all": 537,
|
| 1155 |
"contributors_2025": 233,
|
| 1156 |
"contributors_2024": 207,
|
| 1157 |
-
"contributors_2023": 159
|
|
|
|
| 1158 |
},
|
| 1159 |
{
|
| 1160 |
"repo_name": "flash-linear-attention",
|
| 1161 |
"repo_link": "https://github.com/fla-org/flash-linear-attention",
|
| 1162 |
"category": "gpu kernels",
|
| 1163 |
"github_about_section": "Efficient implementations of state-of-the-art linear attention models",
|
| 1164 |
-
"contributors_all":
|
| 1165 |
"contributors_2025": 64,
|
| 1166 |
"contributors_2024": 22,
|
| 1167 |
-
"contributors_2023": 3
|
|
|
|
| 1168 |
},
|
| 1169 |
{
|
| 1170 |
"repo_name": "nccl",
|
| 1171 |
"repo_link": "https://github.com/NVIDIA/nccl",
|
| 1172 |
"category": "distributed computing",
|
| 1173 |
"github_about_section": "Optimized primitives for collective multi-GPU communication",
|
| 1174 |
-
"homepage_link": "https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/index.html"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1175 |
},
|
| 1176 |
{
|
| 1177 |
"repo_name": "kraken",
|
| 1178 |
"repo_link": "https://github.com/meta-pytorch/kraken",
|
| 1179 |
-
"github_about_section": "Triton-based Symmetric Memory operators and examples"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1180 |
},
|
| 1181 |
{
|
| 1182 |
"repo_name": "nvshmem",
|
| 1183 |
"repo_link": "https://github.com/NVIDIA/nvshmem",
|
| 1184 |
"github_about_section": "NVIDIA NVSHMEM is a parallel programming interface for NVIDIA GPUs based on OpenSHMEM. NVSHMEM can significantly reduce multi-process communication and coordination overheads by allowing programmers to perform one-sided communication from within CUDA kernels and on CUDA streams.",
|
| 1185 |
-
"homepage_link": "https://docs.nvidia.com/nvshmem/api/index.html"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1186 |
},
|
| 1187 |
{
|
| 1188 |
"repo_name": "OLMo",
|
| 1189 |
"repo_link": "https://github.com/allenai/OLMo",
|
| 1190 |
"github_about_section": "Modeling, training, eval, and inference code for OLMo",
|
| 1191 |
-
"homepage_link": "https://allenai.org/olmo"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1192 |
},
|
| 1193 |
{
|
| 1194 |
"repo_name": "kernelbot",
|
| 1195 |
"repo_link": "https://github.com/gpu-mode/kernelbot",
|
| 1196 |
"github_about_section": "Write a fast kernel and see how you compare against the best humans and AI on gpumode.com",
|
| 1197 |
-
"homepage_link": "https://www.gpumode.com"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1198 |
},
|
| 1199 |
{
|
| 1200 |
"repo_name": "openzl",
|
| 1201 |
"repo_link": "https://github.com/facebook/openzl",
|
| 1202 |
"github_about_section": "A novel data compression framework",
|
| 1203 |
-
"homepage_link": "https://openzl.org"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1204 |
},
|
| 1205 |
{
|
| 1206 |
"repo_name": "torchforge",
|
| 1207 |
"repo_link": "https://github.com/meta-pytorch/torchforge",
|
| 1208 |
"github_about_section": "PyTorch-native post-training at scale",
|
| 1209 |
-
"homepage_link": "https://meta-pytorch.org/torchforge"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1210 |
},
|
| 1211 |
{
|
| 1212 |
"repo_name": "open-instruct",
|
| 1213 |
"repo_link": "https://github.com/allenai/open-instruct",
|
| 1214 |
"github_about_section": "AllenAI's post-training codebase",
|
| 1215 |
-
"homepage_link": "https://allenai.github.io/open-instruct/"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1216 |
},
|
| 1217 |
{
|
| 1218 |
"repo_name": "prime-rl",
|
| 1219 |
"repo_link": "https://github.com/PrimeIntellect-ai/prime-rl",
|
| 1220 |
-
"github_about_section": "Agentic RL Training at Scale"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1221 |
},
|
| 1222 |
{
|
| 1223 |
"repo_name": "SkyRL",
|
| 1224 |
"repo_link": "https://github.com/NovaSky-AI/SkyRL",
|
| 1225 |
"github_about_section": "SkyRL: A Modular Full-stack RL Library for LLMs",
|
| 1226 |
-
"homepage_link": "https://docs.skyrl.ai/docs"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1227 |
},
|
| 1228 |
{
|
| 1229 |
"repo_name": "OpenRLHF",
|
| 1230 |
"repo_link": "https://github.com/OpenRLHF/OpenRLHF",
|
| 1231 |
"github_about_section": "An Easy-to-use, Scalable and High-performance Agentic RL Framework based on Ray (PPO & DAPO & REINFORCE++ & VLM & TIS & vLLM & Ray & Async RL)",
|
| 1232 |
-
"homepage_link": "https://openrlhf.readthedocs.io"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1233 |
},
|
| 1234 |
{
|
| 1235 |
"repo_name": "PipelineRL",
|
| 1236 |
"repo_link": "https://github.com/ServiceNow/PipelineRL",
|
| 1237 |
"github_about_section": "A scalable asynchronous reinforcement learning implementation with in-flight weight updates.",
|
| 1238 |
-
"homepage_link": "https://arxiv.org/abs/2509.19128"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1239 |
},
|
| 1240 |
{
|
| 1241 |
"repo_name": "cosmos-predict2.5",
|
| 1242 |
"repo_link": "https://github.com/nvidia-cosmos/cosmos-predict2.5",
|
| 1243 |
"github_about_section": "Cosmos-Predict2.5, the latest version of the Cosmos World Foundation Models (WFMs) family, specialized for simulating and predicting the future state of the world in the form of video.",
|
| 1244 |
-
"homepage_link": "https://research.nvidia.com/labs/cosmos-lab/cosmos-predict2.5"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1245 |
},
|
| 1246 |
{
|
| 1247 |
"repo_name": "AReal",
|
| 1248 |
"repo_link": "https://github.com/inclusionAI/AReaL",
|
| 1249 |
"github_about_section": "The RL Bridge for LLM-based Agent Applications. Made Simple & Flexible.",
|
| 1250 |
-
"homepage_link": "https://www.inclusion-ai.org/AReaL"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1251 |
},
|
| 1252 |
{
|
| 1253 |
"repo_name": "RLinf",
|
| 1254 |
"repo_link": "https://github.com/RLinf/RLinf",
|
| 1255 |
"github_about_section": "RLinf: Reinforcement Learning Infrastructure for Embodied and Agentic AI",
|
| 1256 |
-
"homepage_link": "https://rlinf.readthedocs.io"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1257 |
},
|
| 1258 |
{
|
| 1259 |
"repo_name": "ROLL",
|
| 1260 |
"repo_link": "https://github.com/alibaba/ROLL",
|
| 1261 |
"github_about_section": "An Efficient and User-Friendly Scaling Library for Reinforcement Learning with Large Language Models",
|
| 1262 |
-
"homepage_link": "https://alibaba.github.io/ROLL/"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1263 |
}
|
| 1264 |
]
|
|
|
|
| 6 |
"github_about_section": "The LLVM Project is a collection of modular and reusable compiler and toolchain technologies.",
|
| 7 |
"homepage_link": "http://llvm.org",
|
| 8 |
"github_topic_closest_fit": "compiler",
|
| 9 |
+
"contributors_all": 7086,
|
| 10 |
"contributors_2025": 2378,
|
| 11 |
"contributors_2024": 2130,
|
| 12 |
+
"contributors_2023": 1920,
|
| 13 |
+
"contributors_2026_q1": 1364
|
| 14 |
},
|
| 15 |
{
|
| 16 |
"repo_name": "vllm",
|
|
|
|
| 19 |
"github_about_section": "A high-throughput and memory-efficient inference and serving engine for LLMs",
|
| 20 |
"homepage_link": "https://docs.vllm.ai",
|
| 21 |
"github_topic_closest_fit": "inference",
|
| 22 |
+
"contributors_all": 2351,
|
| 23 |
"contributors_2025": 1369,
|
| 24 |
"contributors_2024": 579,
|
| 25 |
+
"contributors_2023": 145,
|
| 26 |
+
"contributors_2026_q1": 698
|
| 27 |
},
|
| 28 |
{
|
| 29 |
"repo_name": "pytorch",
|
|
|
|
| 32 |
"github_about_section": "Tensors and Dynamic neural networks in Python with strong GPU acceleration",
|
| 33 |
"homepage_link": "https://pytorch.org",
|
| 34 |
"github_topic_closest_fit": "machine-learning",
|
| 35 |
+
"contributors_all": 5690,
|
| 36 |
"contributors_2025": 1187,
|
| 37 |
"contributors_2024": 1090,
|
| 38 |
+
"contributors_2023": 1024,
|
| 39 |
+
"contributors_2026_q1": 560
|
| 40 |
},
|
| 41 |
{
|
| 42 |
"repo_name": "transformers",
|
|
|
|
| 45 |
"github_about_section": "Transformers: the model-definition framework for state-of-the-art machine learning models in text, vision, audio, and multimodal models, for both inference and training.",
|
| 46 |
"homepage_link": "https://huggingface.co/transformers",
|
| 47 |
"github_topic_closest_fit": "machine-learning",
|
| 48 |
+
"contributors_all": 3742,
|
| 49 |
"contributors_2025": 860,
|
| 50 |
"contributors_2024": 769,
|
| 51 |
+
"contributors_2023": 758,
|
| 52 |
+
"contributors_2026_q1": 222
|
| 53 |
},
|
| 54 |
{
|
| 55 |
"repo_name": "sglang",
|
|
|
|
| 58 |
"github_about_section": "SGLang is a fast serving framework for large language models and vision language models.",
|
| 59 |
"homepage_link": "https://docs.sglang.ai",
|
| 60 |
"github_topic_closest_fit": "inference",
|
| 61 |
+
"contributors_all": 1267,
|
| 62 |
"contributors_2025": 796,
|
| 63 |
"contributors_2024": 189,
|
| 64 |
+
"contributors_2023": 1,
|
| 65 |
+
"contributors_2026_q1": 504
|
| 66 |
},
|
| 67 |
{
|
| 68 |
"repo_name": "hhvm",
|
|
|
|
| 74 |
"contributors_all": 2773,
|
| 75 |
"contributors_2025": 692,
|
| 76 |
"contributors_2024": 648,
|
| 77 |
+
"contributors_2023": 604,
|
| 78 |
+
"contributors_2026_q1": 383
|
| 79 |
},
|
| 80 |
{
|
| 81 |
"repo_name": "llama.cpp",
|
|
|
|
| 84 |
"github_about_section": "LLM inference in C/C++",
|
| 85 |
"homepage_link": "https://ggml.ai",
|
| 86 |
"github_topic_closest_fit": "inference",
|
| 87 |
+
"contributors_all": 1573,
|
| 88 |
"contributors_2025": 535,
|
| 89 |
"contributors_2024": 575,
|
| 90 |
+
"contributors_2023": 461,
|
| 91 |
+
"contributors_2026_q1": 246
|
| 92 |
},
|
| 93 |
{
|
| 94 |
"repo_name": "kubernetes",
|
|
|
|
| 97 |
"github_about_section": "Production-Grade Container Scheduling and Management",
|
| 98 |
"homepage_link": "https://kubernetes.io",
|
| 99 |
"github_topic_closest_fit": "kubernetes",
|
| 100 |
+
"contributors_all": 5158,
|
| 101 |
+
"contributors_2025": 542,
|
| 102 |
"contributors_2024": 499,
|
| 103 |
+
"contributors_2023": 565,
|
| 104 |
+
"contributors_2026_q1": 233
|
| 105 |
},
|
| 106 |
{
|
| 107 |
"repo_name": "tensorflow",
|
|
|
|
| 110 |
"github_about_section": "An Open Source Machine Learning Framework for Everyone",
|
| 111 |
"homepage_link": "https://tensorflow.org",
|
| 112 |
"github_topic_closest_fit": "machine-learning",
|
| 113 |
+
"contributors_all": 4679,
|
| 114 |
"contributors_2025": 506,
|
| 115 |
"contributors_2024": 523,
|
| 116 |
+
"contributors_2023": 630,
|
| 117 |
+
"contributors_2026_q1": 257
|
| 118 |
},
|
| 119 |
{
|
| 120 |
"repo_name": "verl",
|
|
|
|
| 123 |
"github_about_section": "verl: Volcano Engine Reinforcement Learning for LLMs",
|
| 124 |
"homepage_link": "https://verl.readthedocs.io",
|
| 125 |
"github_topic_closest_fit": "deep-reinforcement-learning",
|
| 126 |
+
"contributors_all": 584,
|
| 127 |
"contributors_2025": 454,
|
| 128 |
"contributors_2024": 10,
|
| 129 |
+
"contributors_2023": 0,
|
| 130 |
+
"contributors_2026_q1": 153
|
| 131 |
},
|
| 132 |
{
|
| 133 |
"repo_name": "rocm-systems",
|
|
|
|
| 136 |
"github_about_section": "super repo for rocm systems projects",
|
| 137 |
"homepage_link": "https://amd.com/en/products/software/rocm.html",
|
| 138 |
"github_topic_closest_fit": "amd",
|
| 139 |
+
"contributors_all": 1174,
|
| 140 |
+
"contributors_2025": 498,
|
| 141 |
"contributors_2024": 351,
|
| 142 |
+
"contributors_2023": 213,
|
| 143 |
+
"contributors_2026_q1": 250
|
| 144 |
},
|
| 145 |
{
|
| 146 |
"repo_name": "ray",
|
|
|
|
| 149 |
"github_about_section": "Ray is an AI compute engine. Ray consists of a core distributed runtime and a set of AI Libraries for accelerating ML workloads.",
|
| 150 |
"homepage_link": "https://ray.io",
|
| 151 |
"github_topic_closest_fit": "machine-learning",
|
| 152 |
+
"contributors_all": 1473,
|
| 153 |
"contributors_2025": 397,
|
| 154 |
"contributors_2024": 223,
|
| 155 |
+
"contributors_2023": 230,
|
| 156 |
+
"contributors_2026_q1": 173
|
| 157 |
},
|
| 158 |
{
|
| 159 |
"repo_name": "spark",
|
|
|
|
| 162 |
"github_about_section": "Apache Spark - A unified analytics engine for large-scale data processing",
|
| 163 |
"homepage_link": "https://spark.apache.org",
|
| 164 |
"github_topic_closest_fit": "data-processing",
|
| 165 |
+
"contributors_all": 3139,
|
| 166 |
"contributors_2025": 322,
|
| 167 |
"contributors_2024": 300,
|
| 168 |
+
"contributors_2023": 336,
|
| 169 |
+
"contributors_2026_q1": 132
|
| 170 |
},
|
| 171 |
{
|
| 172 |
"repo_name": "goose",
|
|
|
|
| 175 |
"github_about_section": "an open source, extensible AI agent that goes beyond code suggestions - install, execute, edit, and test with any LLM",
|
| 176 |
"homepage_link": "https://block.github.io/goose",
|
| 177 |
"github_topic_closest_fit": "ai-agents",
|
| 178 |
+
"contributors_all": 439,
|
| 179 |
"contributors_2025": 319,
|
| 180 |
"contributors_2024": 32,
|
| 181 |
+
"contributors_2023": 0,
|
| 182 |
+
"contributors_2026_q1": 126
|
| 183 |
},
|
| 184 |
{
|
| 185 |
"repo_name": "elasticsearch",
|
|
|
|
| 188 |
"github_about_section": "Free and Open Source, Distributed, RESTful Search Engine",
|
| 189 |
"homepage_link": "https://elastic.co/products/elasticsearch",
|
| 190 |
"github_topic_closest_fit": "search-engine",
|
| 191 |
+
"contributors_all": 2344,
|
| 192 |
"contributors_2025": 316,
|
| 193 |
"contributors_2024": 284,
|
| 194 |
+
"contributors_2023": 270,
|
| 195 |
+
"contributors_2026_q1": 200
|
| 196 |
},
|
| 197 |
{
|
| 198 |
"repo_name": "jax",
|
|
|
|
| 201 |
"github_about_section": "Composable transformations of Python+NumPy programs: differentiate, vectorize, JIT to GPU/TPU, and more",
|
| 202 |
"homepage_link": "https://docs.jax.dev",
|
| 203 |
"github_topic_closest_fit": "scientific-computing",
|
| 204 |
+
"contributors_all": 1037,
|
| 205 |
+
"contributors_2025": 317,
|
| 206 |
"contributors_2024": 280,
|
| 207 |
+
"contributors_2023": 202,
|
| 208 |
+
"contributors_2026_q1": 130
|
| 209 |
},
|
| 210 |
{
|
| 211 |
"repo_name": "modelcontextprotocol",
|
|
|
|
| 214 |
"github_about_section": "Specification and documentation for the Model Context Protocol",
|
| 215 |
"homepage_link": "https://modelcontextprotocol.io",
|
| 216 |
"github_topic_closest_fit": "mcp",
|
| 217 |
+
"contributors_all": 368,
|
| 218 |
"contributors_2025": 301,
|
| 219 |
"contributors_2024": 42,
|
| 220 |
+
"contributors_2023": 0,
|
| 221 |
+
"contributors_2026_q1": 67
|
| 222 |
},
|
| 223 |
{
|
| 224 |
"repo_name": "executorch",
|
|
|
|
| 227 |
"github_about_section": "On-device AI across mobile, embedded and edge for PyTorch",
|
| 228 |
"homepage_link": "https://executorch.ai",
|
| 229 |
"github_topic_closest_fit": "inference",
|
| 230 |
+
"contributors_all": 503,
|
| 231 |
"contributors_2025": 267,
|
| 232 |
"contributors_2024": 243,
|
| 233 |
+
"contributors_2023": 77,
|
| 234 |
+
"contributors_2026_q1": 136
|
| 235 |
},
|
| 236 |
{
|
| 237 |
"repo_name": "numpy",
|
|
|
|
| 240 |
"github_about_section": "The fundamental package for scientific computing with Python.",
|
| 241 |
"homepage_link": "https://numpy.org",
|
| 242 |
"github_topic_closest_fit": "scientific-computing",
|
| 243 |
+
"contributors_all": 2217,
|
| 244 |
"contributors_2025": 237,
|
| 245 |
"contributors_2024": 233,
|
| 246 |
+
"contributors_2023": 252,
|
| 247 |
+
"contributors_2026_q1": 80
|
| 248 |
},
|
| 249 |
{
|
| 250 |
"repo_name": "triton",
|
|
|
|
| 253 |
"github_about_section": "Development repository for the Triton language and compiler",
|
| 254 |
"homepage_link": "https://triton-lang.org",
|
| 255 |
"github_topic_closest_fit": "parallel-programming",
|
| 256 |
+
"contributors_all": 562,
|
| 257 |
"contributors_2025": 233,
|
| 258 |
"contributors_2024": 206,
|
| 259 |
+
"contributors_2023": 159,
|
| 260 |
+
"contributors_2026_q1": 105
|
| 261 |
},
|
| 262 |
{
|
| 263 |
"repo_name": "modular",
|
|
|
|
| 266 |
"github_about_section": "The Modular Platform (includes MAX & Mojo)",
|
| 267 |
"homepage_link": "https://docs.modular.com",
|
| 268 |
"github_topic_closest_fit": "parallel-programming",
|
| 269 |
+
"contributors_all": 419,
|
| 270 |
"contributors_2025": 222,
|
| 271 |
"contributors_2024": 205,
|
| 272 |
+
"contributors_2023": 99,
|
| 273 |
+
"contributors_2026_q1": 149
|
| 274 |
},
|
| 275 |
{
|
| 276 |
"repo_name": "scipy",
|
|
|
|
| 279 |
"github_about_section": "SciPy library main repository",
|
| 280 |
"homepage_link": "https://scipy.org",
|
| 281 |
"github_topic_closest_fit": "scientific-computing",
|
| 282 |
+
"contributors_all": 2011,
|
| 283 |
"contributors_2025": 213,
|
| 284 |
"contributors_2024": 251,
|
| 285 |
+
"contributors_2023": 245,
|
| 286 |
+
"contributors_2026_q1": 74
|
| 287 |
},
|
| 288 |
{
|
| 289 |
"repo_name": "ollama",
|
|
|
|
| 292 |
"github_about_section": "Get up and running with OpenAI gpt-oss, DeepSeek-R1, Gemma 3 and other models.",
|
| 293 |
"homepage_link": "https://ollama.com",
|
| 294 |
"github_topic_closest_fit": "inference",
|
| 295 |
+
"contributors_all": 599,
|
| 296 |
"contributors_2025": 202,
|
| 297 |
"contributors_2024": 314,
|
| 298 |
+
"contributors_2023": 97,
|
| 299 |
+
"contributors_2026_q1": 40
|
| 300 |
},
|
| 301 |
{
|
| 302 |
"repo_name": "trl",
|
|
|
|
| 305 |
"github_about_section": "Train transformer language models with reinforcement learning.",
|
| 306 |
"homepage_link": "http://hf.co/docs/trl",
|
| 307 |
"github_topic_closest_fit": "reinforcement-learning",
|
| 308 |
+
"contributors_all": 474,
|
| 309 |
"contributors_2025": 189,
|
| 310 |
"contributors_2024": 154,
|
| 311 |
+
"contributors_2023": 122,
|
| 312 |
+
"contributors_2026_q1": 59
|
| 313 |
},
|
| 314 |
{
|
| 315 |
"repo_name": "flashinfer",
|
|
|
|
| 318 |
"github_about_section": "FlashInfer: Kernel Library for LLM Serving",
|
| 319 |
"homepage_link": "https://flashinfer.ai",
|
| 320 |
"github_topic_closest_fit": "attention",
|
| 321 |
+
"contributors_all": 268,
|
| 322 |
"contributors_2025": 158,
|
| 323 |
"contributors_2024": 50,
|
| 324 |
+
"contributors_2023": 11,
|
| 325 |
+
"contributors_2026_q1": 86
|
| 326 |
},
|
| 327 |
{
|
| 328 |
"repo_name": "aiter",
|
|
|
|
| 330 |
"category": "gpu kernels",
|
| 331 |
"github_about_section": "AI Tensor Engine for ROCm",
|
| 332 |
"homepage_link": "https://rocm.blogs.amd.com/software-tools-optimization/aiter-ai-tensor-engine/README.html",
|
| 333 |
+
"contributors_all": 227,
|
| 334 |
"contributors_2025": 145,
|
| 335 |
"contributors_2024": 10,
|
| 336 |
+
"contributors_2023": 0,
|
| 337 |
+
"contributors_2026_q1": 117
|
| 338 |
},
|
| 339 |
{
|
| 340 |
"repo_name": "LMCache",
|
|
|
|
| 342 |
"category": "inference",
|
| 343 |
"github_about_section": "Supercharge Your LLM with the Fastest KV Cache Layer",
|
| 344 |
"homepage_link": "https://lmcache.ai",
|
| 345 |
+
"contributors_all": 175,
|
| 346 |
"contributors_2025": 144,
|
| 347 |
"contributors_2024": 18,
|
| 348 |
+
"contributors_2023": 0,
|
| 349 |
+
"contributors_2026_q1": 42
|
| 350 |
},
|
| 351 |
{
|
| 352 |
"repo_name": "Mooncake",
|
|
|
|
| 355 |
"github_about_section": "Mooncake is the serving platform for Kimi, a leading LLM service provided by Moonshot AI.",
|
| 356 |
"homepage_link": "https://kvcache-ai.github.io/Mooncake",
|
| 357 |
"github_topic_closest_fit": "inference",
|
| 358 |
+
"contributors_all": 195,
|
| 359 |
"contributors_2025": 133,
|
| 360 |
"contributors_2024": 13,
|
| 361 |
+
"contributors_2023": 0,
|
| 362 |
+
"contributors_2026_q1": 80
|
| 363 |
},
|
| 364 |
{
|
| 365 |
"repo_name": "torchtitan",
|
|
|
|
| 367 |
"category": "training framework",
|
| 368 |
"github_about_section": "A PyTorch native platform for training generative AI models",
|
| 369 |
"homepage_link": "https://arxiv.org/abs/2410.06511",
|
| 370 |
+
"contributors_all": 187,
|
| 371 |
"contributors_2025": 119,
|
| 372 |
"contributors_2024": 43,
|
| 373 |
+
"contributors_2023": 1,
|
| 374 |
+
"contributors_2026_q1": 59
|
| 375 |
},
|
| 376 |
{
|
| 377 |
"repo_name": "ao",
|
|
|
|
| 380 |
"github_about_section": "PyTorch native quantization and sparsity for training and inference",
|
| 381 |
"homepage_link": "https://pytorch.org/ao",
|
| 382 |
"github_topic_closest_fit": "quantization",
|
| 383 |
+
"contributors_all": 219,
|
| 384 |
"contributors_2025": 114,
|
| 385 |
"contributors_2024": 100,
|
| 386 |
+
"contributors_2023": 5,
|
| 387 |
+
"contributors_2026_q1": 67
|
| 388 |
},
|
| 389 |
{
|
| 390 |
"repo_name": "ComfyUI",
|
|
|
|
| 393 |
"github_about_section": "The most powerful and modular diffusion model GUI, api and backend with a graph/nodes interface.",
|
| 394 |
"homepage_link": "https://comfy.org",
|
| 395 |
"github_topic_closest_fit": "stable-diffusion",
|
| 396 |
+
"contributors_all": 312,
|
| 397 |
"contributors_2025": 108,
|
| 398 |
"contributors_2024": 119,
|
| 399 |
+
"contributors_2023": 94,
|
| 400 |
+
"contributors_2026_q1": 60
|
| 401 |
},
|
| 402 |
{
|
| 403 |
"repo_name": "unsloth",
|
|
|
|
| 406 |
"github_about_section": "Fine-tuning & Reinforcement Learning for LLMs. Train OpenAI gpt-oss, DeepSeek-R1, Qwen3, Gemma 3, TTS 2x faster with 70% less VRAM.",
|
| 407 |
"homepage_link": "https://docs.unsloth.ai",
|
| 408 |
"github_topic_closest_fit": "fine-tuning",
|
| 409 |
+
"contributors_all": 186,
|
| 410 |
"contributors_2025": 108,
|
| 411 |
"contributors_2024": 29,
|
| 412 |
+
"contributors_2023": 3,
|
| 413 |
+
"contributors_2026_q1": 55
|
| 414 |
},
|
| 415 |
{
|
| 416 |
"repo_name": "accelerate",
|
|
|
|
| 418 |
"category": "training framework",
|
| 419 |
"github_about_section": "A simple way to launch, train, and use PyTorch models on almost any device and distributed configuration, automatic mixed precision (including fp8), and easy-to-configure FSDP and DeepSpeed support.",
|
| 420 |
"homepage_link": "https://huggingface.co/docs/accelerate",
|
| 421 |
+
"contributors_all": 415,
|
| 422 |
"contributors_2025": 97,
|
| 423 |
"contributors_2024": 124,
|
| 424 |
+
"contributors_2023": 149,
|
| 425 |
+
"contributors_2026_q1": 25
|
| 426 |
},
|
| 427 |
{
|
| 428 |
"repo_name": "terminal-bench",
|
|
|
|
| 434 |
"contributors_all": 96,
|
| 435 |
"contributors_2025": 96,
|
| 436 |
"contributors_2024": 0,
|
| 437 |
+
"contributors_2023": 0,
|
| 438 |
+
"contributors_2026_q1": 2
|
| 439 |
},
|
| 440 |
{
|
| 441 |
"repo_name": "DeepSpeed",
|
|
|
|
| 443 |
"category": "training framework",
|
| 444 |
"github_about_section": "DeepSpeed is a deep learning optimization library that makes distributed training and inference easy, efficient, and effective.",
|
| 445 |
"homepage_link": "https://deepspeed.ai",
|
| 446 |
+
"contributors_all": 460,
|
| 447 |
"contributors_2025": 96,
|
| 448 |
"contributors_2024": 134,
|
| 449 |
+
"contributors_2023": 165,
|
| 450 |
+
"contributors_2026_q1": 30
|
| 451 |
},
|
| 452 |
{
|
| 453 |
"repo_name": "milvus",
|
|
|
|
| 456 |
"github_about_section": "Milvus is a high-performance, cloud-native vector database built for scalable vector ANN search",
|
| 457 |
"homepage_link": "https://milvus.io",
|
| 458 |
"github_topic_closest_fit": "vector-search",
|
| 459 |
+
"contributors_all": 399,
|
| 460 |
"contributors_2025": 95,
|
| 461 |
"contributors_2024": 84,
|
| 462 |
+
"contributors_2023": 72,
|
| 463 |
+
"contributors_2026_q1": 49
|
| 464 |
},
|
| 465 |
{
|
| 466 |
"repo_name": "cutlass",
|
|
|
|
| 469 |
"github_about_section": "CUDA Templates and Python DSLs for High-Performance Linear Algebra",
|
| 470 |
"homepage_link": "https://docs.nvidia.com/cutlass/index.html",
|
| 471 |
"github_topic_closest_fit": "parallel-programming",
|
| 472 |
+
"contributors_all": 266,
|
| 473 |
"contributors_2025": 94,
|
| 474 |
"contributors_2024": 64,
|
| 475 |
+
"contributors_2023": 66,
|
| 476 |
+
"contributors_2026_q1": 39
|
| 477 |
},
|
| 478 |
{
|
| 479 |
"repo_name": "tilelang",
|
|
|
|
| 482 |
"github_about_section": "Domain-specific language designed to streamline the development of high-performance GPU/CPU/Accelerators kernels",
|
| 483 |
"homepage_link": "https://tilelang.com",
|
| 484 |
"github_topic_closest_fit": "parallel-programming",
|
| 485 |
+
"contributors_all": 121,
|
| 486 |
"contributors_2025": 89,
|
| 487 |
"contributors_2024": 1,
|
| 488 |
+
"contributors_2023": 0,
|
| 489 |
+
"contributors_2026_q1": 50
|
| 490 |
},
|
| 491 |
{
|
| 492 |
"repo_name": "monarch",
|
|
|
|
| 494 |
"category": "distributed computing",
|
| 495 |
"github_about_section": "PyTorch Single Controller",
|
| 496 |
"homepage_link": "https://meta-pytorch.org/monarch",
|
| 497 |
+
"contributors_all": 103,
|
| 498 |
"contributors_2025": 85,
|
| 499 |
"contributors_2024": 0,
|
| 500 |
+
"contributors_2023": 0,
|
| 501 |
+
"contributors_2026_q1": 45
|
| 502 |
},
|
| 503 |
{
|
| 504 |
"repo_name": "Liger-Kernel",
|
|
|
|
| 507 |
"github_about_section": "Efficient Triton Kernels for LLM Training",
|
| 508 |
"homepage_link": "https://openreview.net/pdf?id=36SjAIT42G",
|
| 509 |
"github_topic_closest_fit": "triton",
|
| 510 |
+
"contributors_all": 140,
|
| 511 |
"contributors_2025": 78,
|
| 512 |
"contributors_2024": 61,
|
| 513 |
+
"contributors_2023": 0,
|
| 514 |
+
"contributors_2026_q1": 31
|
| 515 |
},
|
| 516 |
{
|
| 517 |
"repo_name": "hipBLASLt",
|
|
|
|
| 523 |
"contributors_all": 111,
|
| 524 |
"contributors_2025": 69,
|
| 525 |
"contributors_2024": 70,
|
| 526 |
+
"contributors_2023": 35,
|
| 527 |
+
"contributors_2026_q1": 0
|
| 528 |
},
|
| 529 |
{
|
| 530 |
"repo_name": "peft",
|
|
|
|
| 532 |
"category": "fine tuning",
|
| 533 |
"github_about_section": "PEFT: State-of-the-art Parameter-Efficient Fine-Tuning.",
|
| 534 |
"homepage_link": "https://huggingface.co/docs/peft",
|
| 535 |
+
"contributors_all": 292,
|
| 536 |
"contributors_2025": 69,
|
| 537 |
"contributors_2024": 111,
|
| 538 |
+
"contributors_2023": 115,
|
| 539 |
+
"contributors_2026_q1": 25
|
| 540 |
},
|
| 541 |
{
|
| 542 |
"repo_name": "ROCm",
|
|
|
|
| 544 |
"category": "multi-purpose library",
|
| 545 |
"github_about_section": "AMD ROCm Software - GitHub Home",
|
| 546 |
"homepage_link": "https://rocm.docs.amd.com",
|
| 547 |
+
"contributors_all": 168,
|
| 548 |
"contributors_2025": 67,
|
| 549 |
"contributors_2024": 61,
|
| 550 |
+
"contributors_2023": 44,
|
| 551 |
+
"contributors_2026_q1": 25
|
| 552 |
},
|
| 553 |
{
|
| 554 |
"repo_name": "mcp-agent",
|
|
|
|
| 559 |
"contributors_all": 64,
|
| 560 |
"contributors_2025": 63,
|
| 561 |
"contributors_2024": 1,
|
| 562 |
+
"contributors_2023": 0,
|
| 563 |
+
"contributors_2026_q1": 1
|
| 564 |
},
|
| 565 |
{
|
| 566 |
"repo_name": "onnx",
|
|
|
|
| 569 |
"github_about_section": "Open standard for machine learning interoperability",
|
| 570 |
"homepage_link": "https://onnx.ai",
|
| 571 |
"github_topic_closest_fit": "onnx",
|
| 572 |
+
"contributors_all": 382,
|
| 573 |
"contributors_2025": 56,
|
| 574 |
"contributors_2024": 45,
|
| 575 |
+
"contributors_2023": 61,
|
| 576 |
+
"contributors_2026_q1": 21
|
| 577 |
},
|
| 578 |
{
|
| 579 |
"repo_name": "letta",
|
|
|
|
| 585 |
"contributors_all": 159,
|
| 586 |
"contributors_2025": 57,
|
| 587 |
"contributors_2024": 75,
|
| 588 |
+
"contributors_2023": 47,
|
| 589 |
+
"contributors_2026_q1": 16
|
| 590 |
},
|
| 591 |
{
|
| 592 |
"repo_name": "helion",
|
|
|
|
| 595 |
"github_about_section": "A Python-embedded DSL that makes it easy to write fast, scalable ML kernels with minimal boilerplate.",
|
| 596 |
"homepage_link": "https://helionlang.com",
|
| 597 |
"github_topic_closest_fit": "parallel-programming",
|
| 598 |
+
"contributors_all": 70,
|
| 599 |
"contributors_2025": 49,
|
| 600 |
"contributors_2024": 0,
|
| 601 |
+
"contributors_2023": 0,
|
| 602 |
+
"contributors_2026_q1": 41
|
| 603 |
},
|
| 604 |
{
|
| 605 |
"repo_name": "openevolve",
|
|
|
|
| 610 |
"contributors_all": 51,
|
| 611 |
"contributors_2025": 46,
|
| 612 |
"contributors_2024": 0,
|
| 613 |
+
"contributors_2023": 0,
|
| 614 |
+
"contributors_2026_q1": 7
|
| 615 |
},
|
| 616 |
{
|
| 617 |
"repo_name": "lightning-thunder",
|
|
|
|
| 621 |
"contributors_all": 79,
|
| 622 |
"contributors_2025": 44,
|
| 623 |
"contributors_2024": 47,
|
| 624 |
+
"contributors_2023": 29,
|
| 625 |
+
"contributors_2026_q1": 7
|
| 626 |
},
|
| 627 |
{
|
| 628 |
"repo_name": "truss",
|
|
|
|
| 634 |
"contributors_all": 84,
|
| 635 |
"contributors_2025": 44,
|
| 636 |
"contributors_2024": 30,
|
| 637 |
+
"contributors_2023": 21,
|
| 638 |
+
"contributors_2026_q1": 30
|
| 639 |
},
|
| 640 |
{
|
| 641 |
"repo_name": "cuda-python",
|
|
|
|
| 647 |
"contributors_all": 54,
|
| 648 |
"contributors_2025": 41,
|
| 649 |
"contributors_2024": 12,
|
| 650 |
+
"contributors_2023": 1,
|
| 651 |
+
"contributors_2026_q1": 16
|
| 652 |
},
|
| 653 |
{
|
| 654 |
"repo_name": "warp",
|
|
|
|
| 657 |
"github_about_section": "A Python framework for accelerated simulation, data generation and spatial computing.",
|
| 658 |
"homepage_link": "https://nvidia.github.io/warp",
|
| 659 |
"github_topic_closest_fit": "physics-simulation",
|
| 660 |
+
"contributors_all": 90,
|
| 661 |
"contributors_2025": 40,
|
| 662 |
"contributors_2024": 29,
|
| 663 |
+
"contributors_2023": 17,
|
| 664 |
+
"contributors_2026_q1": 24
|
| 665 |
},
|
| 666 |
{
|
| 667 |
"repo_name": "metaflow",
|
|
|
|
| 672 |
"contributors_all": 132,
|
| 673 |
"contributors_2025": 37,
|
| 674 |
"contributors_2024": 35,
|
| 675 |
+
"contributors_2023": 28,
|
| 676 |
+
"contributors_2026_q1": 23
|
| 677 |
},
|
| 678 |
{
|
| 679 |
"repo_name": "numba",
|
|
|
|
| 681 |
"category": "compiler",
|
| 682 |
"github_about_section": "NumPy aware dynamic Python compiler using LLVM",
|
| 683 |
"homepage_link": "https://numba.pydata.org",
|
| 684 |
+
"contributors_all": 449,
|
| 685 |
"contributors_2025": 40,
|
| 686 |
"contributors_2024": 32,
|
| 687 |
+
"contributors_2023": 55,
|
| 688 |
+
"contributors_2026_q1": 26
|
| 689 |
},
|
| 690 |
{
|
| 691 |
"repo_name": "SWE-bench",
|
|
|
|
| 697 |
"contributors_all": 66,
|
| 698 |
"contributors_2025": 33,
|
| 699 |
"contributors_2024": 37,
|
| 700 |
+
"contributors_2023": 9,
|
| 701 |
+
"contributors_2026_q1": 2
|
| 702 |
},
|
| 703 |
{
|
| 704 |
"repo_name": "Triton-distributed",
|
|
|
|
| 706 |
"category": "distributed computing",
|
| 707 |
"github_about_section": "Distributed Compiler based on Triton for Parallel Systems",
|
| 708 |
"homepage_link": "https://triton-distributed.readthedocs.io",
|
| 709 |
+
"contributors_all": 37,
|
| 710 |
"contributors_2025": 30,
|
| 711 |
"contributors_2024": 0,
|
| 712 |
+
"contributors_2023": 0,
|
| 713 |
+
"contributors_2026_q1": 11
|
| 714 |
},
|
| 715 |
{
|
| 716 |
"repo_name": "ThunderKittens",
|
|
|
|
| 722 |
"contributors_all": 37,
|
| 723 |
"contributors_2025": 29,
|
| 724 |
"contributors_2024": 13,
|
| 725 |
+
"contributors_2023": 0,
|
| 726 |
+
"contributors_2026_q1": 6
|
| 727 |
},
|
| 728 |
{
|
| 729 |
"repo_name": "dstack",
|
|
|
|
| 735 |
"contributors_all": 69,
|
| 736 |
"contributors_2025": 28,
|
| 737 |
"contributors_2024": 42,
|
| 738 |
+
"contributors_2023": 14,
|
| 739 |
+
"contributors_2026_q1": 9
|
| 740 |
},
|
| 741 |
{
|
| 742 |
"repo_name": "ome",
|
|
|
|
| 748 |
"contributors_all": 31,
|
| 749 |
"contributors_2025": 28,
|
| 750 |
"contributors_2024": 0,
|
| 751 |
+
"contributors_2023": 0,
|
| 752 |
+
"contributors_2026_q1": 13
|
| 753 |
},
|
| 754 |
{
|
| 755 |
"repo_name": "server",
|
|
|
|
| 758 |
"github_about_section": "The Triton Inference Server provides an optimized cloud and edge inferencing solution.",
|
| 759 |
"homepage_link": "https://docs.nvidia.com/deeplearning/triton-inference-server/user-guide/docs/index.html",
|
| 760 |
"github_topic_closest_fit": "inference",
|
| 761 |
+
"contributors_all": 150,
|
| 762 |
"contributors_2025": 24,
|
| 763 |
"contributors_2024": 36,
|
| 764 |
+
"contributors_2023": 34,
|
| 765 |
+
"contributors_2026_q1": 7
|
| 766 |
},
|
| 767 |
{
|
| 768 |
"repo_name": "ccache",
|
|
|
|
| 770 |
"category": "compiler",
|
| 771 |
"github_about_section": "ccache - a fast compiler cache",
|
| 772 |
"homepage_link": "https://ccache.dev",
|
| 773 |
+
"contributors_all": 225,
|
| 774 |
"contributors_2025": 20,
|
| 775 |
"contributors_2024": 28,
|
| 776 |
+
"contributors_2023": 22,
|
| 777 |
+
"contributors_2026_q1": 10
|
| 778 |
},
|
| 779 |
{
|
| 780 |
"repo_name": "lapack",
|
|
|
|
| 786 |
"contributors_all": 187,
|
| 787 |
"contributors_2025": 23,
|
| 788 |
"contributors_2024": 25,
|
| 789 |
+
"contributors_2023": 42,
|
| 790 |
+
"contributors_2026_q1": 11
|
| 791 |
},
|
| 792 |
{
|
| 793 |
"repo_name": "quack",
|
| 794 |
"repo_link": "https://github.com/Dao-AILab/quack",
|
| 795 |
"category": "kernel examples",
|
| 796 |
"github_about_section": "A Quirky Assortment of CuTe Kernels",
|
| 797 |
+
"contributors_all": 35,
|
| 798 |
"contributors_2025": 17,
|
| 799 |
"contributors_2024": 0,
|
| 800 |
+
"contributors_2023": 0,
|
| 801 |
+
"contributors_2026_q1": 14
|
| 802 |
},
|
| 803 |
{
|
| 804 |
"repo_name": "KernelBench",
|
|
|
|
| 810 |
"contributors_all": 21,
|
| 811 |
"contributors_2025": 16,
|
| 812 |
"contributors_2024": 3,
|
| 813 |
+
"contributors_2023": 0,
|
| 814 |
+
"contributors_2026_q1": 6
|
| 815 |
},
|
| 816 |
{
|
| 817 |
"repo_name": "reference-kernels",
|
|
|
|
| 819 |
"category": "kernel examples",
|
| 820 |
"github_about_section": "Official Problem Sets / Reference Kernels for the GPU MODE Leaderboard!",
|
| 821 |
"homepage_link": "https://gpumode.com",
|
| 822 |
+
"contributors_all": 24,
|
| 823 |
"contributors_2025": 16,
|
| 824 |
"contributors_2024": 0,
|
| 825 |
+
"contributors_2023": 0,
|
| 826 |
+
"contributors_2026_q1": 13
|
| 827 |
},
|
| 828 |
{
|
| 829 |
"repo_name": "synthetic-data-kit",
|
|
|
|
| 835 |
"contributors_all": 15,
|
| 836 |
"contributors_2025": 15,
|
| 837 |
"contributors_2024": 0,
|
| 838 |
+
"contributors_2023": 0,
|
| 839 |
+
"contributors_2026_q1": 0
|
| 840 |
},
|
| 841 |
{
|
| 842 |
"repo_name": "tritonparse",
|
|
|
|
| 844 |
"category": "performance testing",
|
| 845 |
"github_about_section": "TritonParse: A Compiler Tracer, Visualizer, and Reproducer for Triton Kernels",
|
| 846 |
"homepage_link": "https://meta-pytorch.org/tritonparse",
|
| 847 |
+
"contributors_all": 27,
|
| 848 |
"contributors_2025": 15,
|
| 849 |
"contributors_2024": 0,
|
| 850 |
+
"contributors_2023": 0,
|
| 851 |
+
"contributors_2026_q1": 15
|
| 852 |
},
|
| 853 |
{
|
| 854 |
"repo_name": "kernels",
|
| 855 |
"repo_link": "https://github.com/huggingface/kernels",
|
| 856 |
"category": "gpu kernels",
|
| 857 |
"github_about_section": "Load compute kernels from the Hub",
|
| 858 |
+
"contributors_all": 27,
|
| 859 |
"contributors_2025": 14,
|
| 860 |
"contributors_2024": 2,
|
| 861 |
+
"contributors_2023": 0,
|
| 862 |
+
"contributors_2026_q1": 10
|
| 863 |
},
|
| 864 |
{
|
| 865 |
"repo_name": "Wan2.2",
|
|
|
|
| 871 |
"contributors_all": 16,
|
| 872 |
"contributors_2025": 14,
|
| 873 |
"contributors_2024": 0,
|
| 874 |
+
"contributors_2023": 0,
|
| 875 |
+
"contributors_2026_q1": 3
|
| 876 |
},
|
| 877 |
{
|
| 878 |
"repo_name": "Primus-Turbo",
|
|
|
|
| 882 |
"contributors_all": 14,
|
| 883 |
"contributors_2025": 12,
|
| 884 |
"contributors_2024": 0,
|
| 885 |
+
"contributors_2023": 0,
|
| 886 |
+
"contributors_2026_q1": 6
|
| 887 |
},
|
| 888 |
{
|
| 889 |
"repo_name": "flashinfer-bench",
|
|
|
|
| 892 |
"github_about_section": "Building the Virtuous Cycle for AI-driven LLM Systems",
|
| 893 |
"homepage_link": "https://bench.flashinfer.ai",
|
| 894 |
"github_topic_closest_fit": "benchmark",
|
| 895 |
+
"contributors_all": 18,
|
| 896 |
"contributors_2025": 11,
|
| 897 |
"contributors_2024": 0,
|
| 898 |
+
"contributors_2023": 0,
|
| 899 |
+
"contributors_2026_q1": 9
|
| 900 |
},
|
| 901 |
{
|
| 902 |
"repo_name": "FTorch",
|
|
|
|
| 908 |
"contributors_all": 22,
|
| 909 |
"contributors_2025": 12,
|
| 910 |
"contributors_2024": 8,
|
| 911 |
+
"contributors_2023": 9,
|
| 912 |
+
"contributors_2026_q1": 4
|
| 913 |
},
|
| 914 |
{
|
| 915 |
"repo_name": "TensorRT",
|
|
|
|
| 920 |
"contributors_all": 104,
|
| 921 |
"contributors_2025": 10,
|
| 922 |
"contributors_2024": 18,
|
| 923 |
+
"contributors_2023": 19,
|
| 924 |
+
"contributors_2026_q1": 4
|
| 925 |
},
|
| 926 |
{
|
| 927 |
"repo_name": "TileIR",
|
|
|
|
| 932 |
"contributors_all": 10,
|
| 933 |
"contributors_2025": 10,
|
| 934 |
"contributors_2024": 1,
|
| 935 |
+
"contributors_2023": 0,
|
| 936 |
+
"contributors_2026_q1": 0
|
| 937 |
},
|
| 938 |
{
|
| 939 |
"repo_name": "kernels-community",
|
|
|
|
| 941 |
"category": "gpu kernels",
|
| 942 |
"homepage_link": "https://huggingface.co/kernels-community",
|
| 943 |
"github_about_section": "Kernel sources for https://huggingface.co/kernels-community",
|
| 944 |
+
"contributors_all": 15,
|
| 945 |
"contributors_2025": 9,
|
| 946 |
"contributors_2024": 0,
|
| 947 |
+
"contributors_2023": 0,
|
| 948 |
+
"contributors_2026_q1": 11
|
| 949 |
},
|
| 950 |
{
|
| 951 |
"repo_name": "GEAK-agent",
|
|
|
|
| 953 |
"category": "agent",
|
| 954 |
"github_about_section": "It is an LLM-based AI agent, which can write correct and efficient gpu kernels automatically.",
|
| 955 |
"github_topic_closest_fit": "ai-agents",
|
| 956 |
+
"contributors_all": 20,
|
| 957 |
"contributors_2025": 9,
|
| 958 |
"contributors_2024": 0,
|
| 959 |
+
"contributors_2023": 0,
|
| 960 |
+
"contributors_2026_q1": 12
|
| 961 |
},
|
| 962 |
{
|
| 963 |
"repo_name": "intelliperf",
|
|
|
|
| 969 |
"contributors_all": 7,
|
| 970 |
"contributors_2025": 7,
|
| 971 |
"contributors_2024": 0,
|
| 972 |
+
"contributors_2023": 0,
|
| 973 |
+
"contributors_2026_q1": 2
|
| 974 |
},
|
| 975 |
{
|
| 976 |
"repo_name": "cudnn-frontend",
|
|
|
|
| 982 |
"contributors_all": 14,
|
| 983 |
"contributors_2025": 6,
|
| 984 |
"contributors_2024": 5,
|
| 985 |
+
"contributors_2023": 1,
|
| 986 |
+
"contributors_2026_q1": 3
|
| 987 |
},
|
| 988 |
{
|
| 989 |
"repo_name": "BitBLAS",
|
|
|
|
| 994 |
"contributors_all": 17,
|
| 995 |
"contributors_2025": 5,
|
| 996 |
"contributors_2024": 14,
|
| 997 |
+
"contributors_2023": 0,
|
| 998 |
+
"contributors_2026_q1": 0
|
| 999 |
},
|
| 1000 |
{
|
| 1001 |
"repo_name": "Self-Forcing",
|
|
|
|
| 1007 |
"contributors_all": 4,
|
| 1008 |
"contributors_2025": 4,
|
| 1009 |
"contributors_2024": 0,
|
| 1010 |
+
"contributors_2023": 0,
|
| 1011 |
+
"contributors_2026_q1": 0
|
| 1012 |
},
|
| 1013 |
{
|
| 1014 |
"repo_name": "TritonBench",
|
|
|
|
| 1020 |
"contributors_all": 3,
|
| 1021 |
"contributors_2025": 3,
|
| 1022 |
"contributors_2024": 0,
|
| 1023 |
+
"contributors_2023": 0,
|
| 1024 |
+
"contributors_2026_q1": 0
|
| 1025 |
},
|
| 1026 |
{
|
| 1027 |
"repo_name": "hatchet",
|
|
|
|
| 1033 |
"contributors_all": 25,
|
| 1034 |
"contributors_2025": 3,
|
| 1035 |
"contributors_2024": 6,
|
| 1036 |
+
"contributors_2023": 8,
|
| 1037 |
+
"contributors_2026_q1": 1
|
| 1038 |
},
|
| 1039 |
{
|
| 1040 |
"repo_name": "streamv2v",
|
|
|
|
| 1046 |
"contributors_all": 7,
|
| 1047 |
"contributors_2025": 3,
|
| 1048 |
"contributors_2024": 6,
|
| 1049 |
+
"contributors_2023": 0,
|
| 1050 |
+
"contributors_2026_q1": 0
|
| 1051 |
},
|
| 1052 |
{
|
| 1053 |
"repo_name": "mistral-inference",
|
|
|
|
| 1059 |
"contributors_all": 30,
|
| 1060 |
"contributors_2025": 2,
|
| 1061 |
"contributors_2024": 17,
|
| 1062 |
+
"contributors_2023": 14,
|
| 1063 |
+
"contributors_2026_q1": 1
|
| 1064 |
},
|
| 1065 |
{
|
| 1066 |
"repo_name": "omnitrace",
|
|
|
|
| 1072 |
"contributors_all": 16,
|
| 1073 |
"contributors_2025": 2,
|
| 1074 |
"contributors_2024": 12,
|
| 1075 |
+
"contributors_2023": 2,
|
| 1076 |
+
"contributors_2026_q1": 0
|
| 1077 |
},
|
| 1078 |
{
|
| 1079 |
"repo_name": "IMO2025",
|
|
|
|
| 1085 |
"contributors_all": 2,
|
| 1086 |
"contributors_2025": 2,
|
| 1087 |
"contributors_2024": 0,
|
| 1088 |
+
"contributors_2023": 0,
|
| 1089 |
+
"contributors_2026_q1": 0
|
| 1090 |
},
|
| 1091 |
{
|
| 1092 |
"repo_name": "RaBitQ",
|
|
|
|
| 1098 |
"contributors_all": 2,
|
| 1099 |
"contributors_2025": 2,
|
| 1100 |
"contributors_2024": 1,
|
| 1101 |
+
"contributors_2023": 0,
|
| 1102 |
+
"contributors_2026_q1": 1
|
| 1103 |
},
|
| 1104 |
{
|
| 1105 |
"repo_name": "torchdendrite",
|
|
|
|
| 1109 |
"contributors_all": 2,
|
| 1110 |
"contributors_2025": 1,
|
| 1111 |
"contributors_2024": 1,
|
| 1112 |
+
"contributors_2023": 0,
|
| 1113 |
+
"contributors_2026_q1": 0
|
| 1114 |
},
|
| 1115 |
{
|
| 1116 |
"repo_name": "triton-runner",
|
|
|
|
| 1121 |
"contributors_all": 2,
|
| 1122 |
"contributors_2025": 1,
|
| 1123 |
"contributors_2024": 0,
|
| 1124 |
+
"contributors_2023": 0,
|
| 1125 |
+
"contributors_2026_q1": 2
|
| 1126 |
},
|
| 1127 |
{
|
| 1128 |
"repo_name": "triSYCL",
|
|
|
|
| 1134 |
"contributors_all": 31,
|
| 1135 |
"contributors_2025": 0,
|
| 1136 |
"contributors_2024": 1,
|
| 1137 |
+
"contributors_2023": 3,
|
| 1138 |
+
"contributors_2026_q1": 0
|
| 1139 |
},
|
| 1140 |
{
|
| 1141 |
"repo_name": "StreamDiffusion",
|
|
|
|
| 1147 |
"contributors_all": 29,
|
| 1148 |
"contributors_2025": 0,
|
| 1149 |
"contributors_2024": 9,
|
| 1150 |
+
"contributors_2023": 25,
|
| 1151 |
+
"contributors_2026_q1": 0
|
| 1152 |
},
|
| 1153 |
{
|
| 1154 |
"repo_name": "wandb",
|
|
|
|
| 1156 |
"category": "ml visualization",
|
| 1157 |
"github_about_section": "The AI developer platform. Use Weights & Biases to train and fine-tune models, and manage models from experimentation to production.",
|
| 1158 |
"homepage_link": "https://wandb.ai",
|
| 1159 |
+
"contributors_all": 238,
|
| 1160 |
"contributors_2025": 46,
|
| 1161 |
"contributors_2024": 67,
|
| 1162 |
+
"contributors_2023": 62,
|
| 1163 |
+
"contributors_2026_q1": 24
|
| 1164 |
},
|
| 1165 |
{
|
| 1166 |
"repo_name": "aws-neuron-sdk",
|
|
|
|
| 1168 |
"category": "sdk",
|
| 1169 |
"github_about_section": "Powering AWS purpose-built machine learning chips. Blazing fast and cost effective, natively integrated into PyTorch and TensorFlow and integrated with your favorite AWS services",
|
| 1170 |
"homepage_link": "https://aws.amazon.com/ai/machine-learning/neuron",
|
| 1171 |
+
"contributors_all": 145,
|
| 1172 |
"contributors_2025": 33,
|
| 1173 |
"contributors_2024": 37,
|
| 1174 |
+
"contributors_2023": 32,
|
| 1175 |
+
"contributors_2026_q1": 10
|
| 1176 |
},
|
| 1177 |
{
|
| 1178 |
"repo_name": "onnxruntime",
|
|
|
|
| 1180 |
"category": "machine learning interoperability",
|
| 1181 |
"github_about_section": "ONNX Runtime: cross-platform, high performance ML inferencing and training accelerator",
|
| 1182 |
"homepage_link": "https://onnxruntime.ai",
|
| 1183 |
+
"contributors_all": 877,
|
| 1184 |
"contributors_2025": 237,
|
| 1185 |
"contributors_2024": 213,
|
| 1186 |
+
"contributors_2023": 213,
|
| 1187 |
+
"contributors_2026_q1": 107
|
| 1188 |
},
|
| 1189 |
{
|
| 1190 |
"repo_name": "ort",
|
|
|
|
| 1195 |
"contributors_all": 70,
|
| 1196 |
"contributors_2025": 25,
|
| 1197 |
"contributors_2024": 20,
|
| 1198 |
+
"contributors_2023": 21,
|
| 1199 |
+
"contributors_2026_q1": 11
|
| 1200 |
},
|
| 1201 |
{
|
| 1202 |
"repo_name": "Triton-distributed",
|
|
|
|
| 1204 |
"category": "distributed computing",
|
| 1205 |
"github_about_section": "Distributed Compiler based on Triton for Parallel Systems",
|
| 1206 |
"homepage_link": "https://triton-distributed.readthedocs.io",
|
| 1207 |
+
"contributors_all": 37,
|
| 1208 |
"contributors_2025": 30,
|
| 1209 |
"contributors_2024": 0,
|
| 1210 |
+
"contributors_2023": 0,
|
| 1211 |
+
"contributors_2026_q1": 11
|
| 1212 |
},
|
| 1213 |
{
|
| 1214 |
"repo_name": "gemlite",
|
|
|
|
| 1218 |
"contributors_all": 5,
|
| 1219 |
"contributors_2025": 1,
|
| 1220 |
"contributors_2024": 5,
|
| 1221 |
+
"contributors_2023": 0,
|
| 1222 |
+
"contributors_2026_q1": 1
|
| 1223 |
},
|
| 1224 |
{
|
| 1225 |
"repo_name": "cutile-python",
|
|
|
|
| 1227 |
"category": "parallel computing",
|
| 1228 |
"github_about_section": "cuTile is a programming model for writing parallel kernels for NVIDIA GPUs",
|
| 1229 |
"homepage_link": "https://docs.nvidia.com/cuda/cutile-python",
|
| 1230 |
+
"contributors_all": 20,
|
| 1231 |
"contributors_2025": 10,
|
| 1232 |
"contributors_2024": 0,
|
| 1233 |
+
"contributors_2023": 0,
|
| 1234 |
+
"contributors_2026_q1": 14
|
| 1235 |
},
|
| 1236 |
{
|
| 1237 |
"repo_name": "tilus",
|
|
|
|
| 1239 |
"category": "parallel computing",
|
| 1240 |
"github_about_section": "Tilus is a tile-level kernel programming language with explicit control over shared memory and registers.",
|
| 1241 |
"homepage_link": "https://nvidia.github.io/tilus",
|
| 1242 |
+
"contributors_all": 7,
|
| 1243 |
"contributors_2025": 4,
|
| 1244 |
"contributors_2024": 0,
|
| 1245 |
+
"contributors_2023": 0,
|
| 1246 |
+
"contributors_2026_q1": 3
|
| 1247 |
},
|
| 1248 |
{
|
| 1249 |
"repo_name": "triton-windows",
|
|
|
|
| 1253 |
"contributors_all": 537,
|
| 1254 |
"contributors_2025": 233,
|
| 1255 |
"contributors_2024": 207,
|
| 1256 |
+
"contributors_2023": 159,
|
| 1257 |
+
"contributors_2026_q1": 67
|
| 1258 |
},
|
| 1259 |
{
|
| 1260 |
"repo_name": "flash-linear-attention",
|
| 1261 |
"repo_link": "https://github.com/fla-org/flash-linear-attention",
|
| 1262 |
"category": "gpu kernels",
|
| 1263 |
"github_about_section": "Efficient implementations of state-of-the-art linear attention models",
|
| 1264 |
+
"contributors_all": 93,
|
| 1265 |
"contributors_2025": 64,
|
| 1266 |
"contributors_2024": 22,
|
| 1267 |
+
"contributors_2023": 3,
|
| 1268 |
+
"contributors_2026_q1": 18
|
| 1269 |
},
|
| 1270 |
{
|
| 1271 |
"repo_name": "nccl",
|
| 1272 |
"repo_link": "https://github.com/NVIDIA/nccl",
|
| 1273 |
"category": "distributed computing",
|
| 1274 |
"github_about_section": "Optimized primitives for collective multi-GPU communication",
|
| 1275 |
+
"homepage_link": "https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/index.html",
|
| 1276 |
+
"contributors_all": 84,
|
| 1277 |
+
"contributors_2026_q1": 26,
|
| 1278 |
+
"contributors_2025": 10,
|
| 1279 |
+
"contributors_2024": 5,
|
| 1280 |
+
"contributors_2023": 6
|
| 1281 |
},
|
| 1282 |
{
|
| 1283 |
"repo_name": "kraken",
|
| 1284 |
"repo_link": "https://github.com/meta-pytorch/kraken",
|
| 1285 |
+
"github_about_section": "Triton-based Symmetric Memory operators and examples",
|
| 1286 |
+
"contributors_all": 11,
|
| 1287 |
+
"contributors_2026_q1": 1,
|
| 1288 |
+
"contributors_2025": 11,
|
| 1289 |
+
"contributors_2024": 0,
|
| 1290 |
+
"contributors_2023": 0
|
| 1291 |
},
|
| 1292 |
{
|
| 1293 |
"repo_name": "nvshmem",
|
| 1294 |
"repo_link": "https://github.com/NVIDIA/nvshmem",
|
| 1295 |
"github_about_section": "NVIDIA NVSHMEM is a parallel programming interface for NVIDIA GPUs based on OpenSHMEM. NVSHMEM can significantly reduce multi-process communication and coordination overheads by allowing programmers to perform one-sided communication from within CUDA kernels and on CUDA streams.",
|
| 1296 |
+
"homepage_link": "https://docs.nvidia.com/nvshmem/api/index.html",
|
| 1297 |
+
"contributors_all": 20,
|
| 1298 |
+
"contributors_2026_q1": 10,
|
| 1299 |
+
"contributors_2025": 19,
|
| 1300 |
+
"contributors_2024": 0,
|
| 1301 |
+
"contributors_2023": 0
|
| 1302 |
},
|
| 1303 |
{
|
| 1304 |
"repo_name": "OLMo",
|
| 1305 |
"repo_link": "https://github.com/allenai/OLMo",
|
| 1306 |
"github_about_section": "Modeling, training, eval, and inference code for OLMo",
|
| 1307 |
+
"homepage_link": "https://allenai.org/olmo",
|
| 1308 |
+
"contributors_all": 69,
|
| 1309 |
+
"contributors_2026_q1": 0,
|
| 1310 |
+
"contributors_2025": 16,
|
| 1311 |
+
"contributors_2024": 45,
|
| 1312 |
+
"contributors_2023": 28
|
| 1313 |
},
|
| 1314 |
{
|
| 1315 |
"repo_name": "kernelbot",
|
| 1316 |
"repo_link": "https://github.com/gpu-mode/kernelbot",
|
| 1317 |
"github_about_section": "Write a fast kernel and see how you compare against the best humans and AI on gpumode.com",
|
| 1318 |
+
"homepage_link": "https://www.gpumode.com",
|
| 1319 |
+
"contributors_all": 25,
|
| 1320 |
+
"contributors_2026_q1": 9,
|
| 1321 |
+
"contributors_2025": 19,
|
| 1322 |
+
"contributors_2024": 8,
|
| 1323 |
+
"contributors_2023": 0
|
| 1324 |
},
|
| 1325 |
{
|
| 1326 |
"repo_name": "openzl",
|
| 1327 |
"repo_link": "https://github.com/facebook/openzl",
|
| 1328 |
"github_about_section": "A novel data compression framework",
|
| 1329 |
+
"homepage_link": "https://openzl.org",
|
| 1330 |
+
"contributors_all": 39,
|
| 1331 |
+
"contributors_2026_q1": 24,
|
| 1332 |
+
"contributors_2025": 22,
|
| 1333 |
+
"contributors_2024": 0,
|
| 1334 |
+
"contributors_2023": 0
|
| 1335 |
},
|
| 1336 |
{
|
| 1337 |
"repo_name": "torchforge",
|
| 1338 |
"repo_link": "https://github.com/meta-pytorch/torchforge",
|
| 1339 |
"github_about_section": "PyTorch-native post-training at scale",
|
| 1340 |
+
"homepage_link": "https://meta-pytorch.org/torchforge",
|
| 1341 |
+
"contributors_all": 43,
|
| 1342 |
+
"contributors_2026_q1": 12,
|
| 1343 |
+
"contributors_2025": 36,
|
| 1344 |
+
"contributors_2024": 0,
|
| 1345 |
+
"contributors_2023": 0
|
| 1346 |
},
|
| 1347 |
{
|
| 1348 |
"repo_name": "open-instruct",
|
| 1349 |
"repo_link": "https://github.com/allenai/open-instruct",
|
| 1350 |
"github_about_section": "AllenAI's post-training codebase",
|
| 1351 |
+
"homepage_link": "https://allenai.github.io/open-instruct/",
|
| 1352 |
+
"contributors_all": 57,
|
| 1353 |
+
"contributors_2026_q1": 12,
|
| 1354 |
+
"contributors_2025": 28,
|
| 1355 |
+
"contributors_2024": 24,
|
| 1356 |
+
"contributors_2023": 8
|
| 1357 |
},
|
| 1358 |
{
|
| 1359 |
"repo_name": "prime-rl",
|
| 1360 |
"repo_link": "https://github.com/PrimeIntellect-ai/prime-rl",
|
| 1361 |
+
"github_about_section": "Agentic RL Training at Scale",
|
| 1362 |
+
"contributors_all": 58,
|
| 1363 |
+
"contributors_2026_q1": 29,
|
| 1364 |
+
"contributors_2025": 40,
|
| 1365 |
+
"contributors_2024": 8,
|
| 1366 |
+
"contributors_2023": 0
|
| 1367 |
},
|
| 1368 |
{
|
| 1369 |
"repo_name": "SkyRL",
|
| 1370 |
"repo_link": "https://github.com/NovaSky-AI/SkyRL",
|
| 1371 |
"github_about_section": "SkyRL: A Modular Full-stack RL Library for LLMs",
|
| 1372 |
+
"homepage_link": "https://docs.skyrl.ai/docs",
|
| 1373 |
+
"contributors_all": 77,
|
| 1374 |
+
"contributors_2026_q1": 29,
|
| 1375 |
+
"contributors_2025": 57,
|
| 1376 |
+
"contributors_2024": 0,
|
| 1377 |
+
"contributors_2023": 0
|
| 1378 |
},
|
| 1379 |
{
|
| 1380 |
"repo_name": "OpenRLHF",
|
| 1381 |
"repo_link": "https://github.com/OpenRLHF/OpenRLHF",
|
| 1382 |
"github_about_section": "An Easy-to-use, Scalable and High-performance Agentic RL Framework based on Ray (PPO & DAPO & REINFORCE++ & VLM & TIS & vLLM & Ray & Async RL)",
|
| 1383 |
+
"homepage_link": "https://openrlhf.readthedocs.io",
|
| 1384 |
+
"contributors_all": 93,
|
| 1385 |
+
"contributors_2026_q1": 9,
|
| 1386 |
+
"contributors_2025": 42,
|
| 1387 |
+
"contributors_2024": 45,
|
| 1388 |
+
"contributors_2023": 19
|
| 1389 |
},
|
| 1390 |
{
|
| 1391 |
"repo_name": "PipelineRL",
|
| 1392 |
"repo_link": "https://github.com/ServiceNow/PipelineRL",
|
| 1393 |
"github_about_section": "A scalable asynchronous reinforcement learning implementation with in-flight weight updates.",
|
| 1394 |
+
"homepage_link": "https://arxiv.org/abs/2509.19128",
|
| 1395 |
+
"contributors_all": 14,
|
| 1396 |
+
"contributors_2026_q1": 4,
|
| 1397 |
+
"contributors_2025": 13,
|
| 1398 |
+
"contributors_2024": 0,
|
| 1399 |
+
"contributors_2023": 0
|
| 1400 |
},
|
| 1401 |
{
|
| 1402 |
"repo_name": "cosmos-predict2.5",
|
| 1403 |
"repo_link": "https://github.com/nvidia-cosmos/cosmos-predict2.5",
|
| 1404 |
"github_about_section": "Cosmos-Predict2.5, the latest version of the Cosmos World Foundation Models (WFMs) family, specialized for simulating and predicting the future state of the world in the form of video.",
|
| 1405 |
+
"homepage_link": "https://research.nvidia.com/labs/cosmos-lab/cosmos-predict2.5",
|
| 1406 |
+
"contributors_all": 13,
|
| 1407 |
+
"contributors_2026_q1": 7,
|
| 1408 |
+
"contributors_2025": 9,
|
| 1409 |
+
"contributors_2024": 0,
|
| 1410 |
+
"contributors_2023": 0
|
| 1411 |
},
|
| 1412 |
{
|
| 1413 |
"repo_name": "AReal",
|
| 1414 |
"repo_link": "https://github.com/inclusionAI/AReaL",
|
| 1415 |
"github_about_section": "The RL Bridge for LLM-based Agent Applications. Made Simple & Flexible.",
|
| 1416 |
+
"homepage_link": "https://www.inclusion-ai.org/AReaL",
|
| 1417 |
+
"contributors_all": 89,
|
| 1418 |
+
"contributors_2026_q1": 37,
|
| 1419 |
+
"contributors_2025": 63,
|
| 1420 |
+
"contributors_2024": 0,
|
| 1421 |
+
"contributors_2023": 0
|
| 1422 |
},
|
| 1423 |
{
|
| 1424 |
"repo_name": "RLinf",
|
| 1425 |
"repo_link": "https://github.com/RLinf/RLinf",
|
| 1426 |
"github_about_section": "RLinf: Reinforcement Learning Infrastructure for Embodied and Agentic AI",
|
| 1427 |
+
"homepage_link": "https://rlinf.readthedocs.io",
|
| 1428 |
+
"contributors_all": 76,
|
| 1429 |
+
"contributors_2026_q1": 52,
|
| 1430 |
+
"contributors_2025": 35,
|
| 1431 |
+
"contributors_2024": 0,
|
| 1432 |
+
"contributors_2023": 0
|
| 1433 |
},
|
| 1434 |
{
|
| 1435 |
"repo_name": "ROLL",
|
| 1436 |
"repo_link": "https://github.com/alibaba/ROLL",
|
| 1437 |
"github_about_section": "An Efficient and User-Friendly Scaling Library for Reinforcement Learning with Large Language Models",
|
| 1438 |
+
"homepage_link": "https://alibaba.github.io/ROLL/",
|
| 1439 |
+
"contributors_all": 78,
|
| 1440 |
+
"contributors_2026_q1": 30,
|
| 1441 |
+
"contributors_2025": 60,
|
| 1442 |
+
"contributors_2024": 0,
|
| 1443 |
+
"contributors_2023": 0
|
| 1444 |
}
|
| 1445 |
]
|