Buckets:
| import{s as Un,o as Wn}from"../chunks/scheduler.852ec091.js";import{S as jn,i as Gn,g as r,s as o,r as l,A as Jn,h as d,f as e,c as a,j as h,u,x as c,k as g,y as i,a as s,v as p,d as m,t as b,w as f,m as Kn,n as Xn}from"../chunks/index.28275fd3.js";import{T as Yn}from"../chunks/Tip.9f398c59.js";import{D as y}from"../chunks/Docstring.ee6c313e.js";import{H as Zt,E as Zn}from"../chunks/EditOnGithub.582011f0.js";function to(ee){let _;return{c(){_=Kn("This function is useful for training, but for inference it is advised to use `int8_vectorwise_quant` instead.\nThis implementation performs additional column-wise transposed calculations which are not optimized.")},l(C){_=Xn(C,"This function is useful for training, but for inference it is advised to use `int8_vectorwise_quant` instead.\nThis implementation performs additional column-wise transposed calculations which are not optimized.")},m(C,I){s(C,_,I)},d(C){C&&e(_)}}}function eo(ee){let _,C,I,ne,Q,oe,S,un="The <code>bitsandbytes.functional</code> API provides the low-level building blocks for the library’s features.",ae,F,ie,R,pn="<li>When you need direct control over quantized operations and their parameters.</li> <li>To build custom layers or operations leveraging low-bit arithmetic.</li> <li>To integrate with other ecosystem tooling.</li> <li>For experimental or research purposes requiring non-standard quantization or performance optimizations.</li>",se,B,re,v,U,Me,xt,mn="Determine the quantization statistics for input matrix <code>A</code> in accordance to the <code>LLM.int8()</code> algorithm.",Ve,$t,bn="The statistics are determined both row-wise and column-wise (transposed).",He,Tt,fn='For more information, see the <a href="https://arxiv.org/abs/2208.07339" rel="nofollow">LLM.int8() paper</a>.',Ie,E,de,q,W,Qe,qt,hn="Performs an 8-bit integer matrix multiplication.",Se,wt,gn=`A linear transformation is applied such that <code>out = A @ B.T</code>. When possible, integer tensor core hardware is | |
| utilized to accelerate the operation.`,ce,N,j,Fe,zt,yn="Performs dequantization on the result of a quantized int8 matrix multiplication.",le,P,G,Re,At,vn="Dequantizes a tensor with dtype <code>torch.int8</code> to <code>torch.float32</code>.",ue,w,J,Be,kt,_n="Quantizes a tensor with dtype <code>torch.float16</code> to <code>torch.int8</code> in accordance to the <code>LLM.int8()</code> algorithm.",Ue,Dt,xn='For more information, see the <a href="https://arxiv.org/abs/2208.07339" rel="nofollow">LLM.int8() paper</a>.',pe,K,me,z,X,We,Lt,$n="Dequantizes a packed 4-bit quantized tensor.",je,Ct,Tn=`The input tensor is dequantized by dividing it into blocks of <code>blocksize</code> values. | |
| The the absolute maximum value within these blocks is used for scaling | |
| the non-linear dequantization.`,be,Y,Z,fe,tt,et,he,nt,ot,ge,A,at,Ge,Nt,qn="Quantize tensor A in blocks of 4-bit values.",Je,Pt,wn="Quantizes tensor A by dividing it into blocks which are independently quantized.",ye,it,st,ve,rt,dt,_e,x,ct,Ke,Ot,zn="container for quantization state components to work with Params4bit and similar classes",Xe,M,lt,Ye,Et,An=`returns dict of tensors and strings to use in serialization via _save_to_state_dict() | |
| param: packed — returns dict[str, torch.Tensor] for state_dict fit for safetensors saving`,Ze,T,ut,tn,Mt,kn=`unpacks components of state_dict into QuantState | |
| where necessary, convert into strings, torch.dtype, ints, etc.`,en,Vt,Dn="qs_dict: based on state_dict, with only relevant keys, striped of prefixes.",nn,Ht,Ln="item with key <code>quant_state.bitsandbytes__[nf4/fp4]</code> may contain minor and non-tensor quant state items.",xe,pt,$e,mt,Cn="Primitives used in the 8-bit optimizer quantization.",Te,bt,Nn='For more details see <a href="https://arxiv.org/abs/1511.04561" rel="nofollow">8-Bit Approximations for Parallelism in Deep Learning</a>',qe,k,ft,on,It,Pn="Dequantize a tensor in blocks of values.",an,Qt,On=`The input tensor is dequantized by dividing it into blocks of <code>blocksize</code> values. | |
| The the absolute maximum value within these blocks is used for scaling | |
| the non-linear dequantization.`,we,D,ht,sn,St,En="Quantize a tensor in blocks of values.",rn,Ft,Mn=`The input tensor is quantized by dividing it into blocks of <code>blocksize</code> values. | |
| The the absolute maximum value within these blocks is calculated for scaling | |
| the non-linear quantization.`,ze,gt,Ae,O,yt,dn,Rt,Vn="Gets the memory address of the first element of a tenso",ke,L,vt,cn,Bt,Hn="Verifies that the input tensors are all on the same device.",ln,Ut,In="An input tensor may also be marked as <code>paged</code>, in which case the device placement is ignored.",De,_t,Le,te,Ce;return Q=new Zt({props:{title:"Overview",local:"overview",headingTag:"h1"}}),F=new Zt({props:{title:"When to Use bitsandbytes.functional",local:"when-to-use-bitsandbytesfunctional",headingTag:"h2"}}),B=new Zt({props:{title:"LLM.int8()",local:"bitsandbytes.functional.int8_double_quant",headingTag:"h2"}}),U=new y({props:{name:"bitsandbytes.functional.int8_double_quant",anchor:"bitsandbytes.functional.int8_double_quant",parameters:[{name:"A",val:": Tensor"},{name:"col_stats",val:": typing.Optional[torch.Tensor] = None"},{name:"row_stats",val:": typing.Optional[torch.Tensor] = None"},{name:"out_col",val:": typing.Optional[torch.Tensor] = None"},{name:"out_row",val:": typing.Optional[torch.Tensor] = None"},{name:"threshold",val:" = 0.0"}],parametersDescription:[{anchor:"bitsandbytes.functional.int8_double_quant.A",description:"<strong>A</strong> (<code>torch.Tensor</code> with dtype <code>torch.float16</code>) — The input matrix.",name:"A"},{anchor:"bitsandbytes.functional.int8_double_quant.col_stats",description:"<strong>col_stats</strong> (<code>torch.Tensor</code>, <em>optional</em>) — A pre-allocated tensor to hold the column-wise quantization scales.",name:"col_stats"},{anchor:"bitsandbytes.functional.int8_double_quant.row_stats",description:"<strong>row_stats</strong> (<code>torch.Tensor</code>, <em>optional</em>) — A pre-allocated tensor to hold the row-wise quantization scales.",name:"row_stats"},{anchor:"bitsandbytes.functional.int8_double_quant.out_col",description:"<strong>out_col</strong> (<code>torch.Tensor</code>, <em>optional</em>) — A pre-allocated tensor to hold the column-wise quantized data.",name:"out_col"},{anchor:"bitsandbytes.functional.int8_double_quant.out_row",description:"<strong>out_row</strong> (<code>torch.Tensor</code>, <em>optional</em>) — A pre-allocated tensor to hold the row-wise quantized data.",name:"out_row"},{anchor:"bitsandbytes.functional.int8_double_quant.threshold",description:`<strong>threshold</strong> (<code>float</code>, <em>optional</em>) — | |
| An optional threshold for sparse decomposition of outlier features.</p> | |
| <p>No outliers are held back when 0.0. Defaults to 0.0.`,name:"threshold"}],source:"https://github.com/bitsandbytes-foundation/bitsandbytes/blob/vr_1532/bitsandbytes/functional.py#L2155",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>A tuple containing the quantized tensor and relevant statistics.</p> | |
| <ul> | |
| <li><code>torch.Tensor</code> with dtype <code>torch.int8</code>: The row-wise quantized data.</li> | |
| <li><code>torch.Tensor</code> with dtype <code>torch.int8</code>: The column-wise quantized data.</li> | |
| <li><code>torch.Tensor</code> with dtype <code>torch.float32</code>: The row-wise quantization scales.</li> | |
| <li><code>torch.Tensor</code> with dtype <code>torch.float32</code>: The column-wise quantization scales.</li> | |
| <li><code>torch.Tensor</code> with dtype <code>torch.int32</code>, <em>optional</em>: A list of column indices which contain outlier features.</li> | |
| </ul> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Optional[torch.Tensor]]</code></p> | |
| `}}),E=new Yn({props:{$$slots:{default:[to]},$$scope:{ctx:ee}}}),W=new y({props:{name:"bitsandbytes.functional.int8_linear_matmul",anchor:"bitsandbytes.functional.int8_linear_matmul",parameters:[{name:"A",val:": Tensor"},{name:"B",val:": Tensor"},{name:"out",val:": typing.Optional[torch.Tensor] = None"},{name:"dtype",val:" = torch.int32"}],parametersDescription:[{anchor:"bitsandbytes.functional.int8_linear_matmul.A",description:"<strong>A</strong> (<code>torch.Tensor</code>) — The first matrix operand with the data type <code>torch.int8</code>.",name:"A"},{anchor:"bitsandbytes.functional.int8_linear_matmul.B",description:"<strong>B</strong> (<code>torch.Tensor</code>) — The second matrix operand with the data type <code>torch.int8</code>.",name:"B"},{anchor:"bitsandbytes.functional.int8_linear_matmul.out",description:"<strong>out</strong> (<code>torch.Tensor</code>, <em>optional</em>) — A pre-allocated tensor used to store the result.",name:"out"},{anchor:"bitsandbytes.functional.int8_linear_matmul.dtype",description:"<strong>dtype</strong> (<code>torch.dtype</code>, <em>optional</em>) — The expected data type of the output. Defaults to <code>torch.int32</code>.",name:"dtype"}],source:"https://github.com/bitsandbytes-foundation/bitsandbytes/blob/vr_1532/bitsandbytes/functional.py#L1843",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>The result of the operation.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>torch.Tensor</code></p> | |
| `,raiseDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <ul> | |
| <li><code>NotImplementedError</code> — The operation is not supported in the current environment.</li> | |
| <li><code>RuntimeError</code> — Raised when the cannot be completed for any other reason.</li> | |
| </ul> | |
| `,raiseType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>NotImplementedError</code> or <code>RuntimeError</code></p> | |
| `}}),j=new y({props:{name:"bitsandbytes.functional.int8_mm_dequant",anchor:"bitsandbytes.functional.int8_mm_dequant",parameters:[{name:"A",val:": Tensor"},{name:"row_stats",val:": Tensor"},{name:"col_stats",val:": Tensor"},{name:"out",val:": typing.Optional[torch.Tensor] = None"},{name:"bias",val:": typing.Optional[torch.Tensor] = None"}],parametersDescription:[{anchor:"bitsandbytes.functional.int8_mm_dequant.A",description:"<strong>A</strong> (<code>torch.Tensor</code> with dtype <code>torch.int32</code>) — The result of a quantized int8 matrix multiplication.",name:"A"},{anchor:"bitsandbytes.functional.int8_mm_dequant.row_stats",description:"<strong>row_stats</strong> (<code>torch.Tensor</code>) — The row-wise quantization statistics for the lhs operand of the matrix multiplication.",name:"row_stats"},{anchor:"bitsandbytes.functional.int8_mm_dequant.col_stats",description:"<strong>col_stats</strong> (<code>torch.Tensor</code>) — The column-wise quantization statistics for the rhs operand of the matrix multiplication.",name:"col_stats"},{anchor:"bitsandbytes.functional.int8_mm_dequant.out",description:"<strong>out</strong> (<code>torch.Tensor</code>, <em>optional</em>) — A pre-allocated tensor to store the output of the operation.",name:"out"},{anchor:"bitsandbytes.functional.int8_mm_dequant.bias",description:"<strong>bias</strong> (<code>torch.Tensor</code>, <em>optional</em>) — An optional bias vector to add to the result.",name:"bias"}],source:"https://github.com/bitsandbytes-foundation/bitsandbytes/blob/vr_1532/bitsandbytes/functional.py#L1868",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>The dequantized result with an optional bias, with dtype <code>torch.float16</code>.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>torch.Tensor</code></p> | |
| `}}),G=new y({props:{name:"bitsandbytes.functional.int8_vectorwise_dequant",anchor:"bitsandbytes.functional.int8_vectorwise_dequant",parameters:[{name:"A",val:": Tensor"},{name:"stats",val:": Tensor"}],parametersDescription:[{anchor:"bitsandbytes.functional.int8_vectorwise_dequant.A",description:"<strong>A</strong> (<code>torch.Tensor</code> with dtype <code>torch.int8</code>) — The quantized int8 tensor.",name:"A"},{anchor:"bitsandbytes.functional.int8_vectorwise_dequant.stats",description:"<strong>stats</strong> (<code>torch.Tensor</code> with dtype <code>torch.float32</code>) — The row-wise quantization statistics.",name:"stats"}],source:"https://github.com/bitsandbytes-foundation/bitsandbytes/blob/vr_1532/bitsandbytes/functional.py#L2199",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>The dequantized tensor.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>torch.Tensor</code> with dtype <code>torch.float32</code></p> | |
| `}}),J=new y({props:{name:"bitsandbytes.functional.int8_vectorwise_quant",anchor:"bitsandbytes.functional.int8_vectorwise_quant",parameters:[{name:"A",val:": Tensor"},{name:"threshold",val:" = 0.0"}],parametersDescription:[{anchor:"bitsandbytes.functional.int8_vectorwise_quant.A",description:"<strong>A</strong> (<code>torch.Tensor</code> with dtype <code>torch.float16</code>) — The input tensor.",name:"A"},{anchor:"bitsandbytes.functional.int8_vectorwise_quant.threshold",description:`<strong>threshold</strong> (<code>float</code>, <em>optional</em>) — | |
| An optional threshold for sparse decomposition of outlier features.</p> | |
| <p>No outliers are held back when 0.0. Defaults to 0.0.`,name:"threshold"}],source:"https://github.com/bitsandbytes-foundation/bitsandbytes/blob/vr_1532/bitsandbytes/functional.py#L2213",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>A tuple containing the quantized tensor and relevant statistics.</p> | |
| <ul> | |
| <li><code>torch.Tensor</code> with dtype <code>torch.int8</code>: The quantized data.</li> | |
| <li><code>torch.Tensor</code> with dtype <code>torch.float32</code>: The quantization scales.</li> | |
| <li><code>torch.Tensor</code> with dtype <code>torch.int32</code>, <em>optional</em>: A list of column indices which contain outlier features.</li> | |
| </ul> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]</code></p> | |
| `}}),K=new Zt({props:{title:"4-bit",local:"bitsandbytes.functional.dequantize_4bit",headingTag:"h2"}}),X=new y({props:{name:"bitsandbytes.functional.dequantize_4bit",anchor:"bitsandbytes.functional.dequantize_4bit",parameters:[{name:"A",val:": Tensor"},{name:"quant_state",val:": typing.Optional[bitsandbytes.utils.QuantState] = None"},{name:"absmax",val:": typing.Optional[torch.Tensor] = None"},{name:"out",val:": typing.Optional[torch.Tensor] = None"},{name:"blocksize",val:": int = 64"},{name:"quant_type",val:" = 'fp4'"}],parametersDescription:[{anchor:"bitsandbytes.functional.dequantize_4bit.A",description:"<strong>A</strong> (<code>torch.Tensor</code>) — The quantized input tensor.",name:"A"},{anchor:"bitsandbytes.functional.dequantize_4bit.quant_state",description:`<strong>quant_state</strong> (<code>QuantState</code>, <em>optional</em>) — | |
| The quantization state as returned by <code>quantize_4bit</code>. | |
| Required if <code>absmax</code> is not provided.`,name:"quant_state"},{anchor:"bitsandbytes.functional.dequantize_4bit.absmax",description:`<strong>absmax</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| A tensor containing the scaling values. | |
| Required if <code>quant_state</code> is not provided and ignored otherwise.`,name:"absmax"},{anchor:"bitsandbytes.functional.dequantize_4bit.out",description:"<strong>out</strong> (<code>torch.Tensor</code>, <em>optional</em>) — A tensor to use to store the result.",name:"out"},{anchor:"bitsandbytes.functional.dequantize_4bit.blocksize",description:`<strong>blocksize</strong> (<code>int</code>, <em>optional</em>) — | |
| The size of the blocks. Defaults to 64. | |
| Valid values are 64, 128, 256, 512, 1024, 2048, and 4096.`,name:"blocksize"},{anchor:"bitsandbytes.functional.dequantize_4bit.quant_type",description:"<strong>quant_type</strong> (<code>str</code>, <em>optional</em>) — The data type to use: <code>nf4</code> or <code>fp4</code>. Defaults to <code>fp4</code>.",name:"quant_type"}],source:"https://github.com/bitsandbytes-foundation/bitsandbytes/blob/vr_1532/bitsandbytes/functional.py#L1085",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>The dequantized tensor.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>torch.Tensor</code></p> | |
| `,raiseDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <ul> | |
| <li><code>ValueError</code> — Raised when the input data type or blocksize is not supported.</li> | |
| </ul> | |
| `,raiseType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>ValueError</code></p> | |
| `}}),Z=new y({props:{name:"bitsandbytes.functional.dequantize_fp4",anchor:"bitsandbytes.functional.dequantize_fp4",parameters:[{name:"A",val:": Tensor"},{name:"quant_state",val:": typing.Optional[bitsandbytes.utils.QuantState] = None"},{name:"absmax",val:": typing.Optional[torch.Tensor] = None"},{name:"out",val:": typing.Optional[torch.Tensor] = None"},{name:"blocksize",val:": int = 64"}],source:"https://github.com/bitsandbytes-foundation/bitsandbytes/blob/vr_1532/bitsandbytes/functional.py#L1065"}}),et=new y({props:{name:"bitsandbytes.functional.dequantize_nf4",anchor:"bitsandbytes.functional.dequantize_nf4",parameters:[{name:"A",val:": Tensor"},{name:"quant_state",val:": typing.Optional[bitsandbytes.utils.QuantState] = None"},{name:"absmax",val:": typing.Optional[torch.Tensor] = None"},{name:"out",val:": typing.Optional[torch.Tensor] = None"},{name:"blocksize",val:": int = 64"}],source:"https://github.com/bitsandbytes-foundation/bitsandbytes/blob/vr_1532/bitsandbytes/functional.py#L1075"}}),ot=new y({props:{name:"bitsandbytes.functional.gemv_4bit",anchor:"bitsandbytes.functional.gemv_4bit",parameters:[{name:"A",val:": Tensor"},{name:"B",val:": Tensor"},{name:"out",val:": typing.Optional[torch.Tensor] = None"},{name:"transposed_A",val:" = False"},{name:"transposed_B",val:" = False"},{name:"state",val:" = None"}],source:"https://github.com/bitsandbytes-foundation/bitsandbytes/blob/vr_1532/bitsandbytes/functional.py#L1606"}}),at=new y({props:{name:"bitsandbytes.functional.quantize_4bit",anchor:"bitsandbytes.functional.quantize_4bit",parameters:[{name:"A",val:": Tensor"},{name:"absmax",val:": typing.Optional[torch.Tensor] = None"},{name:"out",val:": typing.Optional[torch.Tensor] = None"},{name:"blocksize",val:" = None"},{name:"compress_statistics",val:" = False"},{name:"quant_type",val:" = 'fp4'"},{name:"quant_storage",val:" = torch.uint8"}],parametersDescription:[{anchor:"bitsandbytes.functional.quantize_4bit.A",description:"<strong>A</strong> (<code>torch.Tensor</code>) — The input tensor. Supports <code>float16</code>, <code>bfloat16</code>, or <code>float32</code> datatypes.",name:"A"},{anchor:"bitsandbytes.functional.quantize_4bit.absmax",description:"<strong>absmax</strong> (<code>torch.Tensor</code>, <em>optional</em>) — A tensor to use to store the absmax values.",name:"absmax"},{anchor:"bitsandbytes.functional.quantize_4bit.out",description:"<strong>out</strong> (<code>torch.Tensor</code>, <em>optional</em>) — A tensor to use to store the result.",name:"out"},{anchor:"bitsandbytes.functional.quantize_4bit.blocksize",description:`<strong>blocksize</strong> (<code>int</code>, <em>optional</em>) — | |
| The size of the blocks. Defaults to 64. | |
| Valid values are 64, 128, 256, 512, 1024, 2048, and 4096.`,name:"blocksize"},{anchor:"bitsandbytes.functional.quantize_4bit.compress_statistics",description:"<strong>compress_statistics</strong> (<code>bool</code>, <em>optional</em>) — Whether to additionally quantize the absmax values. Defaults to False.",name:"compress_statistics"},{anchor:"bitsandbytes.functional.quantize_4bit.quant_type",description:"<strong>quant_type</strong> (<code>str</code>, <em>optional</em>) — The data type to use: <code>nf4</code> or <code>fp4</code>. Defaults to <code>fp4</code>.",name:"quant_type"},{anchor:"bitsandbytes.functional.quantize_4bit.quant_storage",description:"<strong>quant_storage</strong> (<code>torch.dtype</code>, <em>optional</em>) — The dtype of the tensor used to store the result. Defaults to <code>torch.uint8</code>.",name:"quant_storage"}],source:"https://github.com/bitsandbytes-foundation/bitsandbytes/blob/vr_1532/bitsandbytes/functional.py#L1021",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>A tuple containing the quantization results.</p> | |
| <ul> | |
| <li><code>torch.Tensor</code>: The quantized tensor with packed 4-bit values.</li> | |
| <li><code>QuantState</code>: The state object used to undo the quantization.</li> | |
| </ul> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>Tuple[<code>torch.Tensor</code>, <code>QuantState</code>]</p> | |
| `,raiseDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <ul> | |
| <li><code>ValueError</code> — Raised when the input data type is not supported.</li> | |
| </ul> | |
| `,raiseType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>ValueError</code></p> | |
| `}}),st=new y({props:{name:"bitsandbytes.functional.quantize_fp4",anchor:"bitsandbytes.functional.quantize_fp4",parameters:[{name:"A",val:": Tensor"},{name:"absmax",val:": typing.Optional[torch.Tensor] = None"},{name:"out",val:": typing.Optional[torch.Tensor] = None"},{name:"blocksize",val:" = None"},{name:"compress_statistics",val:" = False"},{name:"quant_storage",val:" = torch.uint8"}],source:"https://github.com/bitsandbytes-foundation/bitsandbytes/blob/vr_1532/bitsandbytes/functional.py#L991"}}),dt=new y({props:{name:"bitsandbytes.functional.quantize_nf4",anchor:"bitsandbytes.functional.quantize_nf4",parameters:[{name:"A",val:": Tensor"},{name:"absmax",val:": typing.Optional[torch.Tensor] = None"},{name:"out",val:": typing.Optional[torch.Tensor] = None"},{name:"blocksize",val:" = None"},{name:"compress_statistics",val:" = False"},{name:"quant_storage",val:" = torch.uint8"}],source:"https://github.com/bitsandbytes-foundation/bitsandbytes/blob/vr_1532/bitsandbytes/functional.py#L1006"}}),ct=new y({props:{name:"class bitsandbytes.utils.QuantState",anchor:"bitsandbytes.utils.QuantState",parameters:[{name:"absmax",val:""},{name:"shape",val:" = None"},{name:"code",val:" = None"},{name:"blocksize",val:" = None"},{name:"quant_type",val:" = None"},{name:"dtype",val:" = None"},{name:"offset",val:" = None"},{name:"state2",val:" = None"}],source:"https://github.com/bitsandbytes-foundation/bitsandbytes/blob/vr_1532/bitsandbytes/utils.py#L259"}}),lt=new y({props:{name:"as_dict",anchor:"bitsandbytes.utils.QuantState.as_dict",parameters:[{name:"packed",val:" = False"}],source:"https://github.com/bitsandbytes-foundation/bitsandbytes/blob/vr_1532/bitsandbytes/utils.py#L371"}}),ut=new y({props:{name:"from_dict",anchor:"bitsandbytes.utils.QuantState.from_dict",parameters:[{name:"qs_dict",val:": typing.Dict[str, typing.Any]"},{name:"device",val:": device"}],source:"https://github.com/bitsandbytes-foundation/bitsandbytes/blob/vr_1532/bitsandbytes/utils.py#L320"}}),pt=new Zt({props:{title:"Dynamic 8-bit Quantization",local:"bitsandbytes.functional.dequantize_blockwise",headingTag:"h2"}}),ft=new y({props:{name:"bitsandbytes.functional.dequantize_blockwise",anchor:"bitsandbytes.functional.dequantize_blockwise",parameters:[{name:"A",val:": Tensor"},{name:"quant_state",val:": typing.Optional[bitsandbytes.utils.QuantState] = None"},{name:"absmax",val:": typing.Optional[torch.Tensor] = None"},{name:"code",val:": typing.Optional[torch.Tensor] = None"},{name:"out",val:": typing.Optional[torch.Tensor] = None"},{name:"blocksize",val:": int = 4096"},{name:"nested",val:" = False"}],parametersDescription:[{anchor:"bitsandbytes.functional.dequantize_blockwise.A",description:"<strong>A</strong> (<code>torch.Tensor</code>) — The quantized input tensor.",name:"A"},{anchor:"bitsandbytes.functional.dequantize_blockwise.quant_state",description:`<strong>quant_state</strong> (<code>QuantState</code>, <em>optional</em>) — | |
| The quantization state as returned by <code>quantize_blockwise</code>. | |
| Required if <code>absmax</code> is not provided.`,name:"quant_state"},{anchor:"bitsandbytes.functional.dequantize_blockwise.absmax",description:`<strong>absmax</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| A tensor containing the scaling values. | |
| Required if <code>quant_state</code> is not provided and ignored otherwise.`,name:"absmax"},{anchor:"bitsandbytes.functional.dequantize_blockwise.code",description:`<strong>code</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| A mapping describing the low-bit data type. Defaults to a signed 8-bit dynamic type. | |
| For more details, see (8-Bit Approximations for Parallelism in Deep Learning)[https://arxiv.org/abs/1511.04561]. | |
| Ignored when <code>quant_state</code> is provided.`,name:"code"},{anchor:"bitsandbytes.functional.dequantize_blockwise.out",description:"<strong>out</strong> (<code>torch.Tensor</code>, <em>optional</em>) — A tensor to use to store the result.",name:"out"},{anchor:"bitsandbytes.functional.dequantize_blockwise.blocksize",description:`<strong>blocksize</strong> (<code>int</code>, <em>optional</em>) — | |
| The size of the blocks. Defaults to 4096. | |
| Valid values are 64, 128, 256, 512, 1024, 2048, and 4096. | |
| Ignored when <code>quant_state</code> is provided.`,name:"blocksize"}],source:"https://github.com/bitsandbytes-foundation/bitsandbytes/blob/vr_1532/bitsandbytes/functional.py#L802",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>The dequantized tensor. The datatype is indicated by <code>quant_state.dtype</code> and defaults to <code>torch.float32</code>.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>torch.Tensor</code></p> | |
| `,raiseDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <ul> | |
| <li><code>ValueError</code> — Raised when the input data type is not supported.</li> | |
| </ul> | |
| `,raiseType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>ValueError</code></p> | |
| `}}),ht=new y({props:{name:"bitsandbytes.functional.quantize_blockwise",anchor:"bitsandbytes.functional.quantize_blockwise",parameters:[{name:"A",val:": Tensor"},{name:"code",val:": typing.Optional[torch.Tensor] = None"},{name:"absmax",val:": typing.Optional[torch.Tensor] = None"},{name:"out",val:": typing.Optional[torch.Tensor] = None"},{name:"blocksize",val:" = 4096"},{name:"nested",val:" = False"}],parametersDescription:[{anchor:"bitsandbytes.functional.quantize_blockwise.A",description:"<strong>A</strong> (<code>torch.Tensor</code>) — The input tensor. Supports <code>float16</code>, <code>bfloat16</code>, or <code>float32</code> datatypes.",name:"A"},{anchor:"bitsandbytes.functional.quantize_blockwise.code",description:`<strong>code</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| A mapping describing the low-bit data type. Defaults to a signed 8-bit dynamic type. | |
| For more details, see (8-Bit Approximations for Parallelism in Deep Learning)[https://arxiv.org/abs/1511.04561].`,name:"code"},{anchor:"bitsandbytes.functional.quantize_blockwise.absmax",description:"<strong>absmax</strong> (<code>torch.Tensor</code>, <em>optional</em>) — A tensor to use to store the absmax values.",name:"absmax"},{anchor:"bitsandbytes.functional.quantize_blockwise.out",description:"<strong>out</strong> (<code>torch.Tensor</code>, <em>optional</em>) — A tensor to use to store the result.",name:"out"},{anchor:"bitsandbytes.functional.quantize_blockwise.blocksize",description:`<strong>blocksize</strong> (<code>int</code>, <em>optional</em>) — | |
| The size of the blocks. Defaults to 4096. | |
| Valid values are 64, 128, 256, 512, 1024, 2048, and 4096.`,name:"blocksize"},{anchor:"bitsandbytes.functional.quantize_blockwise.nested",description:"<strong>nested</strong> (<code>bool</code>, <em>optional</em>) — Whether to additionally quantize the absmax values. Defaults to False.",name:"nested"}],source:"https://github.com/bitsandbytes-foundation/bitsandbytes/blob/vr_1532/bitsandbytes/functional.py#L693",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>A tuple containing the quantization results.</p> | |
| <ul> | |
| <li><code>torch.Tensor</code>: The quantized tensor.</li> | |
| <li><code>QuantState</code>: The state object used to undo the quantization.</li> | |
| </ul> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>Tuple[torch.Tensor, QuantState]</code></p> | |
| `,raiseDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <ul> | |
| <li><code>ValueError</code> — Raised when the input data type is not supported.</li> | |
| </ul> | |
| `,raiseType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>ValueError</code></p> | |
| `}}),gt=new Zt({props:{title:"Utility",local:"bitsandbytes.functional.get_ptr",headingTag:"h2"}}),yt=new y({props:{name:"bitsandbytes.functional.get_ptr",anchor:"bitsandbytes.functional.get_ptr",parameters:[{name:"A",val:": typing.Optional[torch.Tensor]"}],parametersDescription:[{anchor:"bitsandbytes.functional.get_ptr.A",description:"<strong>A</strong> (<code>Optional[Tensor]</code>) — A PyTorch tensor.",name:"A"}],source:"https://github.com/bitsandbytes-foundation/bitsandbytes/blob/vr_1532/bitsandbytes/functional.py#L490",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>A pointer to the underlying tensor data.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>Optional[ct.c_void_p]</code></p> | |
| `}}),vt=new y({props:{name:"bitsandbytes.functional.is_on_gpu",anchor:"bitsandbytes.functional.is_on_gpu",parameters:[{name:"tensors",val:": typing.Iterable[typing.Optional[torch.Tensor]]"}],parametersDescription:[{anchor:"bitsandbytes.functional.is_on_gpu.tensors",description:"<strong>tensors</strong> (<code>Iterable[Optional[torch.Tensor]]</code>) — A list of tensors to verify.",name:"tensors"}],source:"https://github.com/bitsandbytes-foundation/bitsandbytes/blob/vr_1532/bitsandbytes/functional.py#L444",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>Literal[True]</code></p> | |
| `,raiseDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <ul> | |
| <li><code>RuntimeError</code> — Raised when the verification fails.</li> | |
| </ul> | |
| `,raiseType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>RuntimeError</code></p> | |
| `}}),_t=new Zn({props:{source:"https://github.com/bitsandbytes-foundation/bitsandbytes/blob/main/docs/source/reference/functional.mdx"}}),{c(){_=r("meta"),C=o(),I=r("p"),ne=o(),l(Q.$$.fragment),oe=o(),S=r("p"),S.innerHTML=un,ae=o(),l(F.$$.fragment),ie=o(),R=r("ul"),R.innerHTML=pn,se=o(),l(B.$$.fragment),re=o(),v=r("div"),l(U.$$.fragment),Me=o(),xt=r("p"),xt.innerHTML=mn,Ve=o(),$t=r("p"),$t.textContent=bn,He=o(),Tt=r("p"),Tt.innerHTML=fn,Ie=o(),l(E.$$.fragment),de=o(),q=r("div"),l(W.$$.fragment),Qe=o(),qt=r("p"),qt.textContent=hn,Se=o(),wt=r("p"),wt.innerHTML=gn,ce=o(),N=r("div"),l(j.$$.fragment),Fe=o(),zt=r("p"),zt.textContent=yn,le=o(),P=r("div"),l(G.$$.fragment),Re=o(),At=r("p"),At.innerHTML=vn,ue=o(),w=r("div"),l(J.$$.fragment),Be=o(),kt=r("p"),kt.innerHTML=_n,Ue=o(),Dt=r("p"),Dt.innerHTML=xn,pe=o(),l(K.$$.fragment),me=o(),z=r("div"),l(X.$$.fragment),We=o(),Lt=r("p"),Lt.textContent=$n,je=o(),Ct=r("p"),Ct.innerHTML=Tn,be=o(),Y=r("div"),l(Z.$$.fragment),fe=o(),tt=r("div"),l(et.$$.fragment),he=o(),nt=r("div"),l(ot.$$.fragment),ge=o(),A=r("div"),l(at.$$.fragment),Ge=o(),Nt=r("p"),Nt.textContent=qn,Je=o(),Pt=r("p"),Pt.textContent=wn,ye=o(),it=r("div"),l(st.$$.fragment),ve=o(),rt=r("div"),l(dt.$$.fragment),_e=o(),x=r("div"),l(ct.$$.fragment),Ke=o(),Ot=r("p"),Ot.textContent=zn,Xe=o(),M=r("div"),l(lt.$$.fragment),Ye=o(),Et=r("p"),Et.textContent=An,Ze=o(),T=r("div"),l(ut.$$.fragment),tn=o(),Mt=r("p"),Mt.textContent=kn,en=o(),Vt=r("p"),Vt.textContent=Dn,nn=o(),Ht=r("p"),Ht.innerHTML=Ln,xe=o(),l(pt.$$.fragment),$e=o(),mt=r("p"),mt.textContent=Cn,Te=o(),bt=r("p"),bt.innerHTML=Nn,qe=o(),k=r("div"),l(ft.$$.fragment),on=o(),It=r("p"),It.textContent=Pn,an=o(),Qt=r("p"),Qt.innerHTML=On,we=o(),D=r("div"),l(ht.$$.fragment),sn=o(),St=r("p"),St.textContent=En,rn=o(),Ft=r("p"),Ft.innerHTML=Mn,ze=o(),l(gt.$$.fragment),Ae=o(),O=r("div"),l(yt.$$.fragment),dn=o(),Rt=r("p"),Rt.textContent=Vn,ke=o(),L=r("div"),l(vt.$$.fragment),cn=o(),Bt=r("p"),Bt.textContent=Hn,ln=o(),Ut=r("p"),Ut.innerHTML=In,De=o(),l(_t.$$.fragment),Le=o(),te=r("p"),this.h()},l(t){const n=Jn("svelte-u9bgzb",document.head);_=d(n,"META",{name:!0,content:!0}),n.forEach(e),C=a(t),I=d(t,"P",{}),h(I).forEach(e),ne=a(t),u(Q.$$.fragment,t),oe=a(t),S=d(t,"P",{"data-svelte-h":!0}),c(S)!=="svelte-donk0z"&&(S.innerHTML=un),ae=a(t),u(F.$$.fragment,t),ie=a(t),R=d(t,"UL",{"data-svelte-h":!0}),c(R)!=="svelte-ug6xw5"&&(R.innerHTML=pn),se=a(t),u(B.$$.fragment,t),re=a(t),v=d(t,"DIV",{class:!0});var $=h(v);u(U.$$.fragment,$),Me=a($),xt=d($,"P",{"data-svelte-h":!0}),c(xt)!=="svelte-apr1r5"&&(xt.innerHTML=mn),Ve=a($),$t=d($,"P",{"data-svelte-h":!0}),c($t)!=="svelte-5ixysv"&&($t.textContent=bn),He=a($),Tt=d($,"P",{"data-svelte-h":!0}),c(Tt)!=="svelte-1f18irr"&&(Tt.innerHTML=fn),Ie=a($),u(E.$$.fragment,$),$.forEach(e),de=a(t),q=d(t,"DIV",{class:!0});var Wt=h(q);u(W.$$.fragment,Wt),Qe=a(Wt),qt=d(Wt,"P",{"data-svelte-h":!0}),c(qt)!=="svelte-1phi9i6"&&(qt.textContent=hn),Se=a(Wt),wt=d(Wt,"P",{"data-svelte-h":!0}),c(wt)!=="svelte-q2aa2u"&&(wt.innerHTML=gn),Wt.forEach(e),ce=a(t),N=d(t,"DIV",{class:!0});var Ne=h(N);u(j.$$.fragment,Ne),Fe=a(Ne),zt=d(Ne,"P",{"data-svelte-h":!0}),c(zt)!=="svelte-1lqdwfe"&&(zt.textContent=yn),Ne.forEach(e),le=a(t),P=d(t,"DIV",{class:!0});var Pe=h(P);u(G.$$.fragment,Pe),Re=a(Pe),At=d(Pe,"P",{"data-svelte-h":!0}),c(At)!=="svelte-15q912e"&&(At.innerHTML=vn),Pe.forEach(e),ue=a(t),w=d(t,"DIV",{class:!0});var jt=h(w);u(J.$$.fragment,jt),Be=a(jt),kt=d(jt,"P",{"data-svelte-h":!0}),c(kt)!=="svelte-1u2p684"&&(kt.innerHTML=_n),Ue=a(jt),Dt=d(jt,"P",{"data-svelte-h":!0}),c(Dt)!=="svelte-1f18irr"&&(Dt.innerHTML=xn),jt.forEach(e),pe=a(t),u(K.$$.fragment,t),me=a(t),z=d(t,"DIV",{class:!0});var Gt=h(z);u(X.$$.fragment,Gt),We=a(Gt),Lt=d(Gt,"P",{"data-svelte-h":!0}),c(Lt)!=="svelte-1o0c7r0"&&(Lt.textContent=$n),je=a(Gt),Ct=d(Gt,"P",{"data-svelte-h":!0}),c(Ct)!=="svelte-1oor9gf"&&(Ct.innerHTML=Tn),Gt.forEach(e),be=a(t),Y=d(t,"DIV",{class:!0});var Qn=h(Y);u(Z.$$.fragment,Qn),Qn.forEach(e),fe=a(t),tt=d(t,"DIV",{class:!0});var Sn=h(tt);u(et.$$.fragment,Sn),Sn.forEach(e),he=a(t),nt=d(t,"DIV",{class:!0});var Fn=h(nt);u(ot.$$.fragment,Fn),Fn.forEach(e),ge=a(t),A=d(t,"DIV",{class:!0});var Jt=h(A);u(at.$$.fragment,Jt),Ge=a(Jt),Nt=d(Jt,"P",{"data-svelte-h":!0}),c(Nt)!=="svelte-1n8tbt5"&&(Nt.textContent=qn),Je=a(Jt),Pt=d(Jt,"P",{"data-svelte-h":!0}),c(Pt)!=="svelte-1ucdexx"&&(Pt.textContent=wn),Jt.forEach(e),ye=a(t),it=d(t,"DIV",{class:!0});var Rn=h(it);u(st.$$.fragment,Rn),Rn.forEach(e),ve=a(t),rt=d(t,"DIV",{class:!0});var Bn=h(rt);u(dt.$$.fragment,Bn),Bn.forEach(e),_e=a(t),x=d(t,"DIV",{class:!0});var V=h(x);u(ct.$$.fragment,V),Ke=a(V),Ot=d(V,"P",{"data-svelte-h":!0}),c(Ot)!=="svelte-1ec4axr"&&(Ot.textContent=zn),Xe=a(V),M=d(V,"DIV",{class:!0});var Oe=h(M);u(lt.$$.fragment,Oe),Ye=a(Oe),Et=d(Oe,"P",{"data-svelte-h":!0}),c(Et)!=="svelte-1ubgx6o"&&(Et.textContent=An),Oe.forEach(e),Ze=a(V),T=d(V,"DIV",{class:!0});var H=h(T);u(ut.$$.fragment,H),tn=a(H),Mt=d(H,"P",{"data-svelte-h":!0}),c(Mt)!=="svelte-1k7tn2t"&&(Mt.textContent=kn),en=a(H),Vt=d(H,"P",{"data-svelte-h":!0}),c(Vt)!=="svelte-11ui0wm"&&(Vt.textContent=Dn),nn=a(H),Ht=d(H,"P",{"data-svelte-h":!0}),c(Ht)!=="svelte-1ykfpyf"&&(Ht.innerHTML=Ln),H.forEach(e),V.forEach(e),xe=a(t),u(pt.$$.fragment,t),$e=a(t),mt=d(t,"P",{"data-svelte-h":!0}),c(mt)!=="svelte-7lntof"&&(mt.textContent=Cn),Te=a(t),bt=d(t,"P",{"data-svelte-h":!0}),c(bt)!=="svelte-1bzp8dj"&&(bt.innerHTML=Nn),qe=a(t),k=d(t,"DIV",{class:!0});var Kt=h(k);u(ft.$$.fragment,Kt),on=a(Kt),It=d(Kt,"P",{"data-svelte-h":!0}),c(It)!=="svelte-sv0f0s"&&(It.textContent=Pn),an=a(Kt),Qt=d(Kt,"P",{"data-svelte-h":!0}),c(Qt)!=="svelte-1oor9gf"&&(Qt.innerHTML=On),Kt.forEach(e),we=a(t),D=d(t,"DIV",{class:!0});var Xt=h(D);u(ht.$$.fragment,Xt),sn=a(Xt),St=d(Xt,"P",{"data-svelte-h":!0}),c(St)!=="svelte-g7axkd"&&(St.textContent=En),rn=a(Xt),Ft=d(Xt,"P",{"data-svelte-h":!0}),c(Ft)!=="svelte-1e3tiho"&&(Ft.innerHTML=Mn),Xt.forEach(e),ze=a(t),u(gt.$$.fragment,t),Ae=a(t),O=d(t,"DIV",{class:!0});var Ee=h(O);u(yt.$$.fragment,Ee),dn=a(Ee),Rt=d(Ee,"P",{"data-svelte-h":!0}),c(Rt)!=="svelte-mjy6qu"&&(Rt.textContent=Vn),Ee.forEach(e),ke=a(t),L=d(t,"DIV",{class:!0});var Yt=h(L);u(vt.$$.fragment,Yt),cn=a(Yt),Bt=d(Yt,"P",{"data-svelte-h":!0}),c(Bt)!=="svelte-10e9qxe"&&(Bt.textContent=Hn),ln=a(Yt),Ut=d(Yt,"P",{"data-svelte-h":!0}),c(Ut)!=="svelte-1n88cbz"&&(Ut.innerHTML=In),Yt.forEach(e),De=a(t),u(_t.$$.fragment,t),Le=a(t),te=d(t,"P",{}),h(te).forEach(e),this.h()},h(){g(_,"name","hf:doc:metadata"),g(_,"content",no),g(v,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(N,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(P,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(w,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(Y,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(tt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(nt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(A,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(it,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(rt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(M,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(T,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(x,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(k,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(D,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(O,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(L,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(t,n){i(document.head,_),s(t,C,n),s(t,I,n),s(t,ne,n),p(Q,t,n),s(t,oe,n),s(t,S,n),s(t,ae,n),p(F,t,n),s(t,ie,n),s(t,R,n),s(t,se,n),p(B,t,n),s(t,re,n),s(t,v,n),p(U,v,null),i(v,Me),i(v,xt),i(v,Ve),i(v,$t),i(v,He),i(v,Tt),i(v,Ie),p(E,v,null),s(t,de,n),s(t,q,n),p(W,q,null),i(q,Qe),i(q,qt),i(q,Se),i(q,wt),s(t,ce,n),s(t,N,n),p(j,N,null),i(N,Fe),i(N,zt),s(t,le,n),s(t,P,n),p(G,P,null),i(P,Re),i(P,At),s(t,ue,n),s(t,w,n),p(J,w,null),i(w,Be),i(w,kt),i(w,Ue),i(w,Dt),s(t,pe,n),p(K,t,n),s(t,me,n),s(t,z,n),p(X,z,null),i(z,We),i(z,Lt),i(z,je),i(z,Ct),s(t,be,n),s(t,Y,n),p(Z,Y,null),s(t,fe,n),s(t,tt,n),p(et,tt,null),s(t,he,n),s(t,nt,n),p(ot,nt,null),s(t,ge,n),s(t,A,n),p(at,A,null),i(A,Ge),i(A,Nt),i(A,Je),i(A,Pt),s(t,ye,n),s(t,it,n),p(st,it,null),s(t,ve,n),s(t,rt,n),p(dt,rt,null),s(t,_e,n),s(t,x,n),p(ct,x,null),i(x,Ke),i(x,Ot),i(x,Xe),i(x,M),p(lt,M,null),i(M,Ye),i(M,Et),i(x,Ze),i(x,T),p(ut,T,null),i(T,tn),i(T,Mt),i(T,en),i(T,Vt),i(T,nn),i(T,Ht),s(t,xe,n),p(pt,t,n),s(t,$e,n),s(t,mt,n),s(t,Te,n),s(t,bt,n),s(t,qe,n),s(t,k,n),p(ft,k,null),i(k,on),i(k,It),i(k,an),i(k,Qt),s(t,we,n),s(t,D,n),p(ht,D,null),i(D,sn),i(D,St),i(D,rn),i(D,Ft),s(t,ze,n),p(gt,t,n),s(t,Ae,n),s(t,O,n),p(yt,O,null),i(O,dn),i(O,Rt),s(t,ke,n),s(t,L,n),p(vt,L,null),i(L,cn),i(L,Bt),i(L,ln),i(L,Ut),s(t,De,n),p(_t,t,n),s(t,Le,n),s(t,te,n),Ce=!0},p(t,[n]){const $={};n&2&&($.$$scope={dirty:n,ctx:t}),E.$set($)},i(t){Ce||(m(Q.$$.fragment,t),m(F.$$.fragment,t),m(B.$$.fragment,t),m(U.$$.fragment,t),m(E.$$.fragment,t),m(W.$$.fragment,t),m(j.$$.fragment,t),m(G.$$.fragment,t),m(J.$$.fragment,t),m(K.$$.fragment,t),m(X.$$.fragment,t),m(Z.$$.fragment,t),m(et.$$.fragment,t),m(ot.$$.fragment,t),m(at.$$.fragment,t),m(st.$$.fragment,t),m(dt.$$.fragment,t),m(ct.$$.fragment,t),m(lt.$$.fragment,t),m(ut.$$.fragment,t),m(pt.$$.fragment,t),m(ft.$$.fragment,t),m(ht.$$.fragment,t),m(gt.$$.fragment,t),m(yt.$$.fragment,t),m(vt.$$.fragment,t),m(_t.$$.fragment,t),Ce=!0)},o(t){b(Q.$$.fragment,t),b(F.$$.fragment,t),b(B.$$.fragment,t),b(U.$$.fragment,t),b(E.$$.fragment,t),b(W.$$.fragment,t),b(j.$$.fragment,t),b(G.$$.fragment,t),b(J.$$.fragment,t),b(K.$$.fragment,t),b(X.$$.fragment,t),b(Z.$$.fragment,t),b(et.$$.fragment,t),b(ot.$$.fragment,t),b(at.$$.fragment,t),b(st.$$.fragment,t),b(dt.$$.fragment,t),b(ct.$$.fragment,t),b(lt.$$.fragment,t),b(ut.$$.fragment,t),b(pt.$$.fragment,t),b(ft.$$.fragment,t),b(ht.$$.fragment,t),b(gt.$$.fragment,t),b(yt.$$.fragment,t),b(vt.$$.fragment,t),b(_t.$$.fragment,t),Ce=!1},d(t){t&&(e(C),e(I),e(ne),e(oe),e(S),e(ae),e(ie),e(R),e(se),e(re),e(v),e(de),e(q),e(ce),e(N),e(le),e(P),e(ue),e(w),e(pe),e(me),e(z),e(be),e(Y),e(fe),e(tt),e(he),e(nt),e(ge),e(A),e(ye),e(it),e(ve),e(rt),e(_e),e(x),e(xe),e($e),e(mt),e(Te),e(bt),e(qe),e(k),e(we),e(D),e(ze),e(Ae),e(O),e(ke),e(L),e(De),e(Le),e(te)),e(_),f(Q,t),f(F,t),f(B,t),f(U),f(E),f(W),f(j),f(G),f(J),f(K,t),f(X),f(Z),f(et),f(ot),f(at),f(st),f(dt),f(ct),f(lt),f(ut),f(pt,t),f(ft),f(ht),f(gt,t),f(yt),f(vt),f(_t,t)}}}const no='{"title":"Overview","local":"overview","sections":[{"title":"When to Use bitsandbytes.functional","local":"when-to-use-bitsandbytesfunctional","sections":[],"depth":2},{"title":"LLM.int8()","local":"bitsandbytes.functional.int8_double_quant","sections":[],"depth":2},{"title":"4-bit","local":"bitsandbytes.functional.dequantize_4bit","sections":[],"depth":2},{"title":"Dynamic 8-bit Quantization","local":"bitsandbytes.functional.dequantize_blockwise","sections":[],"depth":2},{"title":"Utility","local":"bitsandbytes.functional.get_ptr","sections":[],"depth":2}],"depth":1}';function oo(ee){return Wn(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class lo extends jn{constructor(_){super(),Gn(this,_,oo,eo,Un,{})}}export{lo as component}; | |
Xet Storage Details
- Size:
- 43.4 kB
- Xet hash:
- bab049647b2064bd390c298a60b3dbadcdb27dd666b93ab8a121615230f7ac63
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.