Buckets:

download
raw
16.3 kB
import{s as vt,n as yt,o as $t}from"../chunks/scheduler.852ec091.js";import{S as At,i as wt,g as o,s as i,r as l,A as xt,h as s,f as e,c as n,j as A,u as m,x as ot,k as w,y as d,a as r,v as c,d as p,t as _,w as b}from"../chunks/index.28275fd3.js";import{D as H}from"../chunks/Docstring.ee6c313e.js";import{H as st,E as zt}from"../chunks/EditOnGithub.582011f0.js";function Tt(bt){let g,W,M,B,x,O,z,gt='<a href="https://jmlr.org/papers/v12/duchi11a.html" rel="nofollow">AdaGrad (Adaptive Gradient)</a> is an adaptive learning rate optimizer. AdaGrad stores a sum of the squared past gradients for each parameter and uses it to scale their learning rate. This allows the learning rate to be automatically lower or higher depending on the magnitude of the gradient, eliminating the need to manually tune the learning rate.',R,T,U,u,k,dt,v,E,lt,I,ut="Base Adagrad optimizer.",F,C,J,h,D,mt,y,P,ct,V,ht="8-bit Adagrad optimizer.",K,j,Q,f,L,pt,$,N,_t,q,ft="32-bit Adagrad optimizer.",X,G,Y,S,Z;return x=new st({props:{title:"AdaGrad",local:"adagrad",headingTag:"h1"}}),T=new st({props:{title:"Adagrad",local:"api-class ][ bitsandbytes.optim.Adagrad",headingTag:"h2"}}),k=new H({props:{name:"class bitsandbytes.optim.Adagrad",anchor:"bitsandbytes.optim.Adagrad",parameters:[{name:"params",val:""},{name:"lr",val:" = 0.01"},{name:"lr_decay",val:" = 0"},{name:"weight_decay",val:" = 0"},{name:"initial_accumulator_value",val:" = 0"},{name:"eps",val:" = 1e-10"},{name:"optim_bits",val:" = 32"},{name:"args",val:" = None"},{name:"min_8bit_size",val:" = 4096"},{name:"percentile_clipping",val:" = 100"},{name:"block_wise",val:" = True"}],source:"https://github.com/bitsandbytes-foundation/bitsandbytes/blob/vr_1137/bitsandbytes/optim/adagrad.py#L8"}}),E=new H({props:{name:"__init__",anchor:"bitsandbytes.optim.Adagrad.__init__",parameters:[{name:"params",val:""},{name:"lr",val:" = 0.01"},{name:"lr_decay",val:" = 0"},{name:"weight_decay",val:" = 0"},{name:"initial_accumulator_value",val:" = 0"},{name:"eps",val:" = 1e-10"},{name:"optim_bits",val:" = 32"},{name:"args",val:" = None"},{name:"min_8bit_size",val:" = 4096"},{name:"percentile_clipping",val:" = 100"},{name:"block_wise",val:" = True"}],parametersDescription:[{anchor:"bitsandbytes.optim.Adagrad.__init__.params",description:`<strong>params</strong> (<code>torch.tensor</code>) &#x2014;
The input parameters to optimize.`,name:"params"},{anchor:"bitsandbytes.optim.Adagrad.__init__.lr",description:`<strong>lr</strong> (<code>float</code>, defaults to 1e-2) &#x2014;
The learning rate.`,name:"lr"},{anchor:"bitsandbytes.optim.Adagrad.__init__.lr_decay",description:`<strong>lr_decay</strong> (<code>int</code>, defaults to 0) &#x2014;
The learning rate decay.`,name:"lr_decay"},{anchor:"bitsandbytes.optim.Adagrad.__init__.weight_decay",description:`<strong>weight_decay</strong> (<code>float</code>, defaults to 0.0) &#x2014;
The weight decay value for the optimizer.`,name:"weight_decay"},{anchor:"bitsandbytes.optim.Adagrad.__init__.initial_accumulator_value",description:`<strong>initial_accumulator_value</strong> (<code>int</code>, defaults to 0) &#x2014;
The initial momemtum values.`,name:"initial_accumulator_value"},{anchor:"bitsandbytes.optim.Adagrad.__init__.eps",description:`<strong>eps</strong> (<code>float</code>, defaults to 1e-10) &#x2014;
The epsilon value prevents division by zero in the optimizer.`,name:"eps"},{anchor:"bitsandbytes.optim.Adagrad.__init__.optim_bits",description:`<strong>optim_bits</strong> (<code>int</code>, defaults to 32) &#x2014;
The number of bits of the optimizer state.`,name:"optim_bits"},{anchor:"bitsandbytes.optim.Adagrad.__init__.args",description:`<strong>args</strong> (<code>object</code>, defaults to <code>None</code>) &#x2014;
An object with additional arguments.`,name:"args"},{anchor:"bitsandbytes.optim.Adagrad.__init__.min_8bit_size",description:`<strong>min_8bit_size</strong> (<code>int</code>, defaults to 4096) &#x2014;
The minimum number of elements of the parameter tensors for 8-bit optimization.`,name:"min_8bit_size"},{anchor:"bitsandbytes.optim.Adagrad.__init__.percentile_clipping",description:`<strong>percentile_clipping</strong> (<code>int</code>, defaults to 100) &#x2014;
Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.`,name:"percentile_clipping"},{anchor:"bitsandbytes.optim.Adagrad.__init__.block_wise",description:`<strong>block_wise</strong> (<code>bool</code>, defaults to <code>True</code>) &#x2014;
Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.`,name:"block_wise"}],source:"https://github.com/bitsandbytes-foundation/bitsandbytes/blob/vr_1137/bitsandbytes/optim/adagrad.py#L9"}}),C=new st({props:{title:"Adagrad8bit",local:"bitsandbytes.optim.Adagrad8bit",headingTag:"h2"}}),D=new H({props:{name:"class bitsandbytes.optim.Adagrad8bit",anchor:"bitsandbytes.optim.Adagrad8bit",parameters:[{name:"params",val:""},{name:"lr",val:" = 0.01"},{name:"lr_decay",val:" = 0"},{name:"weight_decay",val:" = 0"},{name:"initial_accumulator_value",val:" = 0"},{name:"eps",val:" = 1e-10"},{name:"optim_bits",val:" = 8"},{name:"args",val:" = None"},{name:"min_8bit_size",val:" = 4096"},{name:"percentile_clipping",val:" = 100"},{name:"block_wise",val:" = True"}],source:"https://github.com/bitsandbytes-foundation/bitsandbytes/blob/vr_1137/bitsandbytes/optim/adagrad.py#L75"}}),P=new H({props:{name:"__init__",anchor:"bitsandbytes.optim.Adagrad8bit.__init__",parameters:[{name:"params",val:""},{name:"lr",val:" = 0.01"},{name:"lr_decay",val:" = 0"},{name:"weight_decay",val:" = 0"},{name:"initial_accumulator_value",val:" = 0"},{name:"eps",val:" = 1e-10"},{name:"optim_bits",val:" = 8"},{name:"args",val:" = None"},{name:"min_8bit_size",val:" = 4096"},{name:"percentile_clipping",val:" = 100"},{name:"block_wise",val:" = True"}],parametersDescription:[{anchor:"bitsandbytes.optim.Adagrad8bit.__init__.params",description:`<strong>params</strong> (<code>torch.tensor</code>) &#x2014;
The input parameters to optimize.`,name:"params"},{anchor:"bitsandbytes.optim.Adagrad8bit.__init__.lr",description:`<strong>lr</strong> (<code>float</code>, defaults to 1e-2) &#x2014;
The learning rate.`,name:"lr"},{anchor:"bitsandbytes.optim.Adagrad8bit.__init__.lr_decay",description:`<strong>lr_decay</strong> (<code>int</code>, defaults to 0) &#x2014;
The learning rate decay.`,name:"lr_decay"},{anchor:"bitsandbytes.optim.Adagrad8bit.__init__.weight_decay",description:`<strong>weight_decay</strong> (<code>float</code>, defaults to 0.0) &#x2014;
The weight decay value for the optimizer.`,name:"weight_decay"},{anchor:"bitsandbytes.optim.Adagrad8bit.__init__.initial_accumulator_value",description:`<strong>initial_accumulator_value</strong> (<code>int</code>, defaults to 0) &#x2014;
The initial momemtum values.`,name:"initial_accumulator_value"},{anchor:"bitsandbytes.optim.Adagrad8bit.__init__.eps",description:`<strong>eps</strong> (<code>float</code>, defaults to 1e-10) &#x2014;
The epsilon value prevents division by zero in the optimizer.`,name:"eps"},{anchor:"bitsandbytes.optim.Adagrad8bit.__init__.optim_bits",description:`<strong>optim_bits</strong> (<code>int</code>, defaults to 8) &#x2014;
The number of bits of the optimizer state.`,name:"optim_bits"},{anchor:"bitsandbytes.optim.Adagrad8bit.__init__.args",description:`<strong>args</strong> (<code>object</code>, defaults to <code>None</code>) &#x2014;
An object with additional arguments.`,name:"args"},{anchor:"bitsandbytes.optim.Adagrad8bit.__init__.min_8bit_size",description:`<strong>min_8bit_size</strong> (<code>int</code>, defaults to 4096) &#x2014;
The minimum number of elements of the parameter tensors for 8-bit optimization.`,name:"min_8bit_size"},{anchor:"bitsandbytes.optim.Adagrad8bit.__init__.percentile_clipping",description:`<strong>percentile_clipping</strong> (<code>int</code>, defaults to 100) &#x2014;
Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.`,name:"percentile_clipping"},{anchor:"bitsandbytes.optim.Adagrad8bit.__init__.block_wise",description:`<strong>block_wise</strong> (<code>bool</code>, defaults to <code>True</code>) &#x2014;
Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.`,name:"block_wise"}],source:"https://github.com/bitsandbytes-foundation/bitsandbytes/blob/vr_1137/bitsandbytes/optim/adagrad.py#L76"}}),j=new st({props:{title:"Adagrad32bit",local:"bitsandbytes.optim.Adagrad32bit",headingTag:"h2"}}),L=new H({props:{name:"class bitsandbytes.optim.Adagrad32bit",anchor:"bitsandbytes.optim.Adagrad32bit",parameters:[{name:"params",val:""},{name:"lr",val:" = 0.01"},{name:"lr_decay",val:" = 0"},{name:"weight_decay",val:" = 0"},{name:"initial_accumulator_value",val:" = 0"},{name:"eps",val:" = 1e-10"},{name:"optim_bits",val:" = 32"},{name:"args",val:" = None"},{name:"min_8bit_size",val:" = 4096"},{name:"percentile_clipping",val:" = 100"},{name:"block_wise",val:" = True"}],source:"https://github.com/bitsandbytes-foundation/bitsandbytes/blob/vr_1137/bitsandbytes/optim/adagrad.py#L143"}}),N=new H({props:{name:"__init__",anchor:"bitsandbytes.optim.Adagrad32bit.__init__",parameters:[{name:"params",val:""},{name:"lr",val:" = 0.01"},{name:"lr_decay",val:" = 0"},{name:"weight_decay",val:" = 0"},{name:"initial_accumulator_value",val:" = 0"},{name:"eps",val:" = 1e-10"},{name:"optim_bits",val:" = 32"},{name:"args",val:" = None"},{name:"min_8bit_size",val:" = 4096"},{name:"percentile_clipping",val:" = 100"},{name:"block_wise",val:" = True"}],parametersDescription:[{anchor:"bitsandbytes.optim.Adagrad32bit.__init__.params",description:`<strong>params</strong> (<code>torch.tensor</code>) &#x2014;
The input parameters to optimize.`,name:"params"},{anchor:"bitsandbytes.optim.Adagrad32bit.__init__.lr",description:`<strong>lr</strong> (<code>float</code>, defaults to 1e-2) &#x2014;
The learning rate.`,name:"lr"},{anchor:"bitsandbytes.optim.Adagrad32bit.__init__.lr_decay",description:`<strong>lr_decay</strong> (<code>int</code>, defaults to 0) &#x2014;
The learning rate decay.`,name:"lr_decay"},{anchor:"bitsandbytes.optim.Adagrad32bit.__init__.weight_decay",description:`<strong>weight_decay</strong> (<code>float</code>, defaults to 0.0) &#x2014;
The weight decay value for the optimizer.`,name:"weight_decay"},{anchor:"bitsandbytes.optim.Adagrad32bit.__init__.initial_accumulator_value",description:`<strong>initial_accumulator_value</strong> (<code>int</code>, defaults to 0) &#x2014;
The initial momemtum values.`,name:"initial_accumulator_value"},{anchor:"bitsandbytes.optim.Adagrad32bit.__init__.eps",description:`<strong>eps</strong> (<code>float</code>, defaults to 1e-10) &#x2014;
The epsilon value prevents division by zero in the optimizer.`,name:"eps"},{anchor:"bitsandbytes.optim.Adagrad32bit.__init__.optim_bits",description:`<strong>optim_bits</strong> (<code>int</code>, defaults to 32) &#x2014;
The number of bits of the optimizer state.`,name:"optim_bits"},{anchor:"bitsandbytes.optim.Adagrad32bit.__init__.args",description:`<strong>args</strong> (<code>object</code>, defaults to <code>None</code>) &#x2014;
An object with additional arguments.`,name:"args"},{anchor:"bitsandbytes.optim.Adagrad32bit.__init__.min_8bit_size",description:`<strong>min_8bit_size</strong> (<code>int</code>, defaults to 4096) &#x2014;
The minimum number of elements of the parameter tensors for 8-bit optimization.`,name:"min_8bit_size"},{anchor:"bitsandbytes.optim.Adagrad32bit.__init__.percentile_clipping",description:`<strong>percentile_clipping</strong> (<code>int</code>, defaults to 100) &#x2014;
Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.`,name:"percentile_clipping"},{anchor:"bitsandbytes.optim.Adagrad32bit.__init__.block_wise",description:`<strong>block_wise</strong> (<code>bool</code>, defaults to <code>True</code>) &#x2014;
Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.`,name:"block_wise"}],source:"https://github.com/bitsandbytes-foundation/bitsandbytes/blob/vr_1137/bitsandbytes/optim/adagrad.py#L144"}}),G=new zt({props:{source:"https://github.com/bitsandbytes-foundation/bitsandbytes/blob/main/docs/source/reference/optim/adagrad.mdx"}}),{c(){g=o("meta"),W=i(),M=o("p"),B=i(),l(x.$$.fragment),O=i(),z=o("p"),z.innerHTML=gt,R=i(),l(T.$$.fragment),U=i(),u=o("div"),l(k.$$.fragment),dt=i(),v=o("div"),l(E.$$.fragment),lt=i(),I=o("p"),I.textContent=ut,F=i(),l(C.$$.fragment),J=i(),h=o("div"),l(D.$$.fragment),mt=i(),y=o("div"),l(P.$$.fragment),ct=i(),V=o("p"),V.textContent=ht,K=i(),l(j.$$.fragment),Q=i(),f=o("div"),l(L.$$.fragment),pt=i(),$=o("div"),l(N.$$.fragment),_t=i(),q=o("p"),q.textContent=ft,X=i(),l(G.$$.fragment),Y=i(),S=o("p"),this.h()},l(t){const a=xt("svelte-u9bgzb",document.head);g=s(a,"META",{name:!0,content:!0}),a.forEach(e),W=n(t),M=s(t,"P",{}),A(M).forEach(e),B=n(t),m(x.$$.fragment,t),O=n(t),z=s(t,"P",{"data-svelte-h":!0}),ot(z)!=="svelte-1l8niyc"&&(z.innerHTML=gt),R=n(t),m(T.$$.fragment,t),U=n(t),u=s(t,"DIV",{class:!0});var tt=A(u);m(k.$$.fragment,tt),dt=n(tt),v=s(tt,"DIV",{class:!0});var et=A(v);m(E.$$.fragment,et),lt=n(et),I=s(et,"P",{"data-svelte-h":!0}),ot(I)!=="svelte-2ft5mo"&&(I.textContent=ut),et.forEach(e),tt.forEach(e),F=n(t),m(C.$$.fragment,t),J=n(t),h=s(t,"DIV",{class:!0});var at=A(h);m(D.$$.fragment,at),mt=n(at),y=s(at,"DIV",{class:!0});var it=A(y);m(P.$$.fragment,it),ct=n(it),V=s(it,"P",{"data-svelte-h":!0}),ot(V)!=="svelte-1jvi8fr"&&(V.textContent=ht),it.forEach(e),at.forEach(e),K=n(t),m(j.$$.fragment,t),Q=n(t),f=s(t,"DIV",{class:!0});var nt=A(f);m(L.$$.fragment,nt),pt=n(nt),$=s(nt,"DIV",{class:!0});var rt=A($);m(N.$$.fragment,rt),_t=n(rt),q=s(rt,"P",{"data-svelte-h":!0}),ot(q)!=="svelte-r8ezau"&&(q.textContent=ft),rt.forEach(e),nt.forEach(e),X=n(t),m(G.$$.fragment,t),Y=n(t),S=s(t,"P",{}),A(S).forEach(e),this.h()},h(){w(g,"name","hf:doc:metadata"),w(g,"content",kt),w(v,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(u,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(y,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(h,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w($,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(f,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(t,a){d(document.head,g),r(t,W,a),r(t,M,a),r(t,B,a),c(x,t,a),r(t,O,a),r(t,z,a),r(t,R,a),c(T,t,a),r(t,U,a),r(t,u,a),c(k,u,null),d(u,dt),d(u,v),c(E,v,null),d(v,lt),d(v,I),r(t,F,a),c(C,t,a),r(t,J,a),r(t,h,a),c(D,h,null),d(h,mt),d(h,y),c(P,y,null),d(y,ct),d(y,V),r(t,K,a),c(j,t,a),r(t,Q,a),r(t,f,a),c(L,f,null),d(f,pt),d(f,$),c(N,$,null),d($,_t),d($,q),r(t,X,a),c(G,t,a),r(t,Y,a),r(t,S,a),Z=!0},p:yt,i(t){Z||(p(x.$$.fragment,t),p(T.$$.fragment,t),p(k.$$.fragment,t),p(E.$$.fragment,t),p(C.$$.fragment,t),p(D.$$.fragment,t),p(P.$$.fragment,t),p(j.$$.fragment,t),p(L.$$.fragment,t),p(N.$$.fragment,t),p(G.$$.fragment,t),Z=!0)},o(t){_(x.$$.fragment,t),_(T.$$.fragment,t),_(k.$$.fragment,t),_(E.$$.fragment,t),_(C.$$.fragment,t),_(D.$$.fragment,t),_(P.$$.fragment,t),_(j.$$.fragment,t),_(L.$$.fragment,t),_(N.$$.fragment,t),_(G.$$.fragment,t),Z=!1},d(t){t&&(e(W),e(M),e(B),e(O),e(z),e(R),e(U),e(u),e(F),e(J),e(h),e(K),e(Q),e(f),e(X),e(Y),e(S)),e(g),b(x,t),b(T,t),b(k),b(E),b(C,t),b(D),b(P),b(j,t),b(L),b(N),b(G,t)}}}const kt='{"title":"AdaGrad","local":"adagrad","sections":[{"title":"Adagrad","local":"api-class ][ bitsandbytes.optim.Adagrad","sections":[],"depth":2},{"title":"Adagrad8bit","local":"bitsandbytes.optim.Adagrad8bit","sections":[],"depth":2},{"title":"Adagrad32bit","local":"bitsandbytes.optim.Adagrad32bit","sections":[],"depth":2}],"depth":1}';function Et(bt){return $t(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Lt extends At{constructor(g){super(),wt(this,g,Et,Tt,vt,{})}}export{Lt as component};

Xet Storage Details

Size:
16.3 kB
·
Xet hash:
7e2fc99d1361691fd01944c7296001bd7d2e8db746d9a1359d1fe17fc0bbb6a9

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.