From 5922349bb7119b8afed9182212cab65066ec6234 Mon Sep 17 00:00:00 2001 From: TensorNull <129579691+TensorNull@users.noreply.github.com> Date: Wed, 17 Sep 2025 05:38:49 +0800 Subject: [PATCH] =?UTF-8?q?feat:=20Implement=20CometAPI=20integration=20fo?= =?UTF-8?q?r=20chat=20completions=20and=20model=20m=E2=80=A6=20(#4379)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: Implement CometAPI integration for chat completions and model management - Added CometApiLLM class for handling chat completions using CometAPI. - Implemented model synchronization and caching mechanisms. - Introduced streaming support for chat responses with timeout handling. - Created CometApiProvider class for agent interactions with CometAPI. - Enhanced error handling and logging throughout the integration. - Established a structure for managing function calls and completions. * linting --------- Co-authored-by: timothycarambat --- README.md | 2 +- docker/.env.example | 5 + .../LLMSelection/CometApiLLMOptions/index.jsx | 155 +++++++ frontend/src/media/llmprovider/cometapi.png | Bin 0 -> 23847 bytes .../GeneralSettings/LLMPreference/index.jsx | 44 +- .../Steps/DataHandling/index.jsx | 9 + .../Steps/LLMPreference/index.jsx | 9 + .../AgentConfig/AgentLLMSelection/index.jsx | 6 +- locales/README.ja-JP.md | 1 + locales/README.zh-CN.md | 5 +- server/.env.example | 8 +- server/models/systemSettings.js | 5 + server/storage/models/.gitignore | 3 +- .../utils/AiProviders/cometapi/constants.js | 39 ++ server/utils/AiProviders/cometapi/index.js | 433 ++++++++++++++++++ server/utils/agents/aibitat/index.js | 2 + .../agents/aibitat/providers/ai-provider.js | 8 + .../agents/aibitat/providers/cometapi.js | 115 +++++ .../utils/agents/aibitat/providers/index.js | 2 + server/utils/agents/index.js | 7 + server/utils/helpers/customModels.js | 18 + server/utils/helpers/index.js | 8 + server/utils/helpers/updateENV.js | 15 + 23 files changed, 872 insertions(+), 27 deletions(-) create mode 100644 frontend/src/components/LLMSelection/CometApiLLMOptions/index.jsx create mode 100644 frontend/src/media/llmprovider/cometapi.png create mode 100644 server/utils/AiProviders/cometapi/constants.js create mode 100644 server/utils/AiProviders/cometapi/index.js create mode 100644 server/utils/agents/aibitat/providers/cometapi.js diff --git a/README.md b/README.md index 90243c35..88922e65 100644 --- a/README.md +++ b/README.md @@ -102,7 +102,7 @@ AnythingLLM divides your documents into objects called `workspaces`. A Workspace - [Novita AI (chat models)](https://novita.ai/model-api/product/llm-api?utm_source=github_anything-llm&utm_medium=github_readme&utm_campaign=link) - [PPIO](https://ppinfra.com?utm_source=github_anything-llm) - [Moonshot AI](https://www.moonshot.ai/) - +- [CometAPI (chat models)](https://api.cometapi.com/) **Embedder models:** - [AnythingLLM Native Embedder](/server/storage/models/README.md) (default) diff --git a/docker/.env.example b/docker/.env.example index dca22fa0..bd268053 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -96,6 +96,11 @@ GID='1000' # NOVITA_LLM_API_KEY='your-novita-api-key-here' check on https://novita.ai/settings/key-management # NOVITA_LLM_MODEL_PREF='deepseek/deepseek-r1' +# LLM_PROVIDER='cometapi' +# COMETAPI_LLM_API_KEY='your-cometapi-api-key-here' # Get one at https://api.cometapi.com/console/token +# COMETAPI_LLM_MODEL_PREF='gpt-5-mini' +# COMETAPI_LLM_TIMEOUT_MS=500 # Optional; stream idle timeout in ms (min 500ms) + # LLM_PROVIDER='cohere' # COHERE_API_KEY= # COHERE_MODEL_PREF='command-r' diff --git a/frontend/src/components/LLMSelection/CometApiLLMOptions/index.jsx b/frontend/src/components/LLMSelection/CometApiLLMOptions/index.jsx new file mode 100644 index 00000000..71fbeec6 --- /dev/null +++ b/frontend/src/components/LLMSelection/CometApiLLMOptions/index.jsx @@ -0,0 +1,155 @@ +import System from "@/models/system"; +import { CaretDown, CaretUp } from "@phosphor-icons/react"; +import { useState, useEffect } from "react"; + +export default function CometApiLLMOptions({ settings }) { + return ( +
+
+
+ + +
+ {!settings?.credentialsOnly && ( + + )} +
+ +
+ ); +} + +function AdvancedControls({ settings }) { + const [showAdvancedControls, setShowAdvancedControls] = useState(false); + + return ( +
+
+ +
+ +
+ ); +} + +function CometApiModelSelection({ settings }) { + // TODO: For now, CometAPI models list is noisy; show a flat, deduped list without grouping. + // Revisit after CometAPI model list API provides better categorization/metadata. + const [models, setModels] = useState([]); + const [loading, setLoading] = useState(true); + + useEffect(() => { + async function findCustomModels() { + setLoading(true); + const { models: fetched = [] } = await System.customModels("cometapi"); + if (fetched?.length > 0) { + // De-duplicate by id (case-insensitive) and sort by name for readability + const seen = new Set(); + const unique = []; + for (const m of fetched) { + const key = String(m.id || m.name || "").toLowerCase(); + if (!seen.has(key)) { + seen.add(key); + unique.push(m); + } + } + unique.sort((a, b) => + String(a.name || a.id).localeCompare(String(b.name || b.id)) + ); + setModels(unique); + } else { + setModels([]); + } + setLoading(false); + } + findCustomModels(); + }, []); + + if (loading || models.length === 0) { + return ( +
+ + +
+ ); + } + + return ( +
+ + + + {models.map((model) => ( + + ))} + +

+ You can type the model id directly or pick from suggestions. +

+
+ ); +} diff --git a/frontend/src/media/llmprovider/cometapi.png b/frontend/src/media/llmprovider/cometapi.png new file mode 100644 index 0000000000000000000000000000000000000000..40d139ded573867fddb6da3c46630384f028fd2b GIT binary patch literal 23847 zcmdqIWmH_8gjydL#KKh7MRhGp>BSm}m>=~xKoRs>rXU`!|zbMGS zC%zy;B;X%ZCplf$XV0*DpMIaGvS5)tdxltNtEuCrqogQk?&ttEwQw}E1baC+0i&Ni z6BhGwGBvlebfY%2w6=wcFrBn@GEv)Fh%jmMD7{f~lC-q3mGg12)bLT(H21MH7qDOw z6GapD5(Fl2uyiw}_HwX?x(a%UF#R*HAn^aEkJ*`kK`s_ng6dK-{~iKdi7?r?xj6~4 zvwM1af<3vwjxN^h90CFY>~A>PIXT&Y5p1sBP&ZRAHmEBdFasL(KXXV~x|+M#I=R_8 zLaCqTG&OT{cN1Y^LZklo)Z%V#F1BX>nH}m1HvQ)l_orLHz@phrEnL|-z;B*ji<(;a zU*iSEEnJ^2|Md_JOXq(s{&i(<`_Ge{OkG?pHNBlIMVQnrT^-$B%q{FdF#G@U)zdTn>jr5rH+c(DesewxQwx4eHY-a39yV?ZJ}WkU zQ!aBhOHNaME^bRJE(>m+|4j6+TmI)XGG@RmJe)k7ygULN+#GxYJp6C|bHhJh{?GB6 zj_&3*PfHQy{Lire`s|;v!t77dwYUA3Q2x32w^aUf$G=4OuYvzpFDhvY^)RJoR=0#&Sh`rcfT%4T%|)cdWt<(nc{rS*@2K5fEnP(3 ziA(XB@v2KnX-iX^xtKyNM8u)qZZ@`1YehvRYE4U12VwUAbLRi+YW}rAz$^d;g8e^c z1i1K*Ap%}RgvkXkGauc9C!RgiA(NL9*YwIc{JxV;KkHq7yYzKWsRcZ{^1{YT_8B53 ziW7=Yq|D?`xnd5%wtDN%lg`@JD;w>F)J$BEto#L{cmg8o3E7dqr+2z2*TXWl8q$YX zLHkiZKZ9NKT}j_w+gey#%RH=quetp$0RgEoze4K^FIwK`D^O*Al}*8rg7#6O77fBo zfcDXhAVu{x@bzovLQUpZQSVeGDu6MS1)=JNs?1vAsHGBxs$YWfARi?Pb&&pleWP>b zYLG;&5u*Ae+B(%4V?gIKb-_&xObz^q%Ycw5wh%l>Uz!xeiB7&RKEqWa>q7~p6n|k! z2WwmaeFZY^JN8yk;jd8YO7x*L83?`whWIGXrdw<~h?$u5F->OTRf#*M5@;_Dkf@mk zZDgM+`0vkH`g7&pFJ!g9KWer|=;3f|NGUc*eh!f8i?=~aVqh=Y9#|)qZx+am!9{U< zF7b9{oH;YNsA9tU{e>ZoO0q{-W4UvSqx znCrfEe_qDOoIz?1ihZG*{6aSu9@8xcF`t_uRwhYd!_<#P!cVb*x&vy%i#bAFwBCoi zmV)n6w+)w>-mzHTfUwXW@Vwpmj|mnS*lMAdJ}8^Gj)b#7YMjx_6FJ*-=%I zfH>LclEzUPqKyV~$ecePpIh~99O-qXdc5U|aH5SQf;_M}rII4p8zvaV@n2iI9fPx$ z^c(qexaLwPNgCWIPolXA^j|QfL}5xpPD@Jm`v&&VM}qh&McLWC4IEzndWg_scK(rF zsp+W2-8Dl~X_r!^!+pY{EBwpa`Hl6P98Tse1P$hiB{V?u?NJg_yl`2)ac6~|*Jump zWqS%3UT1pJ?Td#})t2_bdvJnE`}zvS^*5@MU`)vBOZuGixg5-yq>W%rjdMmBU{ehv zOL}8XV9~_``6B|td?zlY1nTe&kGzot$0cTN>MHa*)>wa%kb##DqFffP+}4xUwmSPd z9UwJ{id_L`yZByI(@}Kq^8NEEfpDU9gEAqSXrs_JrQ(5X>1U0MyG@8~T*&bki^44| zeGc9s0d}P~#jjlJfpa>cL5r$Puj?eaQgJKHwR5_kG_a-B)lPQwX%}wAfrzmn_|g>_ zJB>|L#KyJAhSTDpbHc2o*&SH*MQF1FQ(lyS?)mM6VtD>Yi-A3orZ7VSUkEawVK3{E zRWRoLoViCFypv-J`;<6$gWgUq(_&60_E6Gf%f)IjtMO1qN?Q;*UE`! z3iyX3#KM9m@LrRs_)GnmGI!Q#^@cUzzAu@n<{m;R?Twr8;(7ik;aL#_ zS(kI76hzfV?zk;XQGtZ@y|q`KuasP53=Y+NS~#BWB@s(fbeG7>^}IPw=|NWjyB=Gm z4-jj-?fx~myDl2S3IoJi%P3g5Ir4b`yG-a| z$}L=BzV>0y zav|Z)-<(^aWV}jr5tz(w)aEvabv!bYf|8A+POtxX=M(4`q_V{p6(;hmXC4H@Yb5H- z0vU1SHt)YInY}FfOf`W)A4}c%zVaTF1PP__tVFVdQTGjxE?&htQ zUf@c#&%@;<;cBi&hR2WVGw*{BGzIqW0rkVOz&vKl7M`dJ@tygHr^&kEx}N@714y;{%G|@Sm;_qU?^=wxj{~ zo@R&lLy^`?#?@Z!`sb@;YX&tx(rMOtivSI}Sl2!!y7g#3{{7d$(eLx-tOh} z7#rA~L;8vvvFIWYO!_0Ih`x8g5yu^Eg?UZRsje-&%?8U(JEA&i1>U!0tn=9*;*O2BJalR= zKD?hOo!WTA=`r#lKz~DLbltUwR|TtdE}cj20KuG0F&Ab1yI>jPAQ@?!eeTO)UTwYS zV?PBSY-ZMg=S*Pw?k|BqNVBiEc)#RJAtv=}UoA6%{)zCbFQ$bc;p{AAeb|((#|rc) z?Nd+yUs5vBk65PXETS{!lPSYt;@Qi#xkV%=6Dy-~UDz8dKclOHsD8TLz+#5CJnTs) zhvW60c}qGK*_;jB*ZFy9MxxNB_Xk|Tf2e-1HQp33G!vIPYfjAPW;XANVJ6_E=3j2Fg7aiFh9-9&gRqL+g!s~ir%jw7>}jn=P%qt zfs|}0v}8Rog9IF*Hg&;)bH?jbr4Fv?HEncCF6hNdBMh3*tD))mP6P*jeR{v4%Xv5Y zxYQ<&x`ic!kjuQz^dyyUedy6fV=hU-H`{)n#@4U3W=E?7DS+VMx^mZ9!EVkPh(xHPedom}6Y7xU9ZNod; zXV$-!u0)npb=&CeOof0tMDw<*i~0(xFvvwwXx0;dZg?jb;g&P~v3*U6n|pXs=J@Tq zAI9IQVI}(0is#uy6}>KO%!NxI0*~1GM~3*E3EEOrQBjjb9?gUC zk)lwS@Y%6DE-IDeG?g&(!n@u%UUW96#F&;U@EDgERBF z6Sp(5ynst56?g1CYw=FcsZitOq1@$avai{fDX!k6kYQ%fUZnZ^RoGbr-d0opzALB- z-j~Sz*Hk6U7ea`nq4bfyZyfPuo#3NZSZ&Ivv7sZp-aGx45+k173s)@5d2layIa9a? zL?M~mtt}1g6K%?y9jS*Ni*Rcn`Xs>kxG(}H0X_o9kcB3KLQ?+jFVJnBjeUqCQrknk z8NSu#i}}>>qWul6<@*$MVA%Lgji;D+Heh5D!Na^>)sp z(}OgMGbTCQKfE>B3pQJzwmx{Jf&79bKQ5KKR+Zg|Rml8I>HC68E7wP53xHi5N1?A_?NfehoraFM zoG`f#i?4~bz8SK~lGLSe$UWh1d5D*T2nO_8ESnc?I~T7zRjqUCUT0MM{`@6UMn*ZU z97NQgX4455Wa&J8ek-69RDpiS$w=p&oEY4vGht^5gW3KzYG{ZgNX>qS2Cf_K8+)L^StOvfk(J! z;fg0dg(X`pOP^X#9`%OCi9tH#lyc7Y+F|%TBv{xb^M-XKEP6TzI^sqZQB)(@5AYRM zQ~KLB7*@KzxOO=LQ&PJ+{N^6U3xv+a-Y+D-G5JxypYhGcmv5fHx*0xj_;UeChAGdz zyX3Ay7oD+&%ih;Lv>Af#sxFV>FM1+wwYs*D!t2i|Rx<2eTw42XZ?V|eX(MPKRIA$- zTc2^Wfg3WiWH_o^y{JbhdPt7R#+(Gt0cpj=Ac zsh(h!eK~butz>WbWw>@Jntq4u`D&icxhK9uHE>G6uZ2NMPTCWjfBDmPl0zz*NxpkJ zdg>R%tt`Gm^-}BYv+C+pbmw2aXv33N2no?mu8A+d{h9C`O1p=Id(M_0jgG}`P<8(E z_VvkVgKaAfl-PgH*1IPWFNgXED(${{H+u3bgF=a9CnGF^02&3$7UdLv!fj*vzQ}qrDM)r8MnUVBv4-s)x zwR4}(7GjaFW43N`@gIe~OL}!C80{{vy3O7PhvCJevS8G! zxxCR5{Vu?(`bTy$+cqiXs1JBomnj(Kz49OnUlyRd?M~mii5w3$3Pl8DqIp5Zo?J7n zI5DN}9h_-aE73Qj7EJJzR7Aq%h``KaC+f;6z8u=htK;@?ydeiUX26X94(Gxm%6=a3 zsJo_YHh!Nl?mHa}LnGunaK93QpUs`(t)H;2>* z{f2jz{IbW$!SLG+|0RK>(u`w6mQ)a;=PSiOPAX}(Oq<&1P&0Nl;~(>3V=6o3Q1%RR z7&Ec(-m!<0%4nW<|J`DBY9C4IPzAcl&fNfYx<5O=XX-c zX+%)(4}M~ap{@95qi)%ep9cU1{R{7)5%vBm7dZ2Fwv|0AVyo=J7sB}WzFU~b_)M>_ zN8v_XtKExWaNy1X2!@f&y~XdaWKp+x%`E-*VR~Jz=in*jvz*k@ZU>~~)z#d!<0R=; zS@X!m6n^pEE$MKvK<_kbux3lc?)Y9e|CW^{gUg@3Ez&9Xe6D(mj)2)`FYh(xWUf=4 zX{fDjgqN)0v2gC#t&djnKCE;m3#hlFn&E&4^&KWmgJ``jTvC<;Wje2Ksn=*F-7D-P zTv=muaA&PeNrE70yYg_xDEd_8E<1dsKX;b`0Ra{L_B{iuU2oK%g;B;+leYGUTdoCf zO$@$C649Spbfa6zwN>+cK`11;+Uy$CHc)3#bsWE*6%-7*S#pXZ%MFU6;+T+cRBFeU zH!^VZ4(K*xb@=oH3s8E@Brn#Z<<_w=B7^Om&23Z$jg5)t*)=g`#LAatg~BoP(MLhn zquMuD`sfd5F0rj-)xK@WUi>d5UJB7;l5?9(*qoMBA^*r!$AdQ6j80;e=`#Ty3_M)SYKe>fh z@}TFVn#o+Q6lL#?v`3B;L-$$-I&<{Q063&mMDRyBcIxi1TcD6$*6-0Gn6~Vcfemub z=le*?jKa{_*OWEdBFe9n!ZdtlPaIf^LtV0eOP?x_Z0wvnk6BET-I~n&cI^RCGV5oE zeDZ>jsLTnwb`0frG<48{B8mrcs$4DJjQDTpLa9bwzKZcS%%wIl*KoKbza2pLw&)zt zUrtXv*tURu6}9;lEZuT*FJ9^yVNG$lIOk@>nA~>(K8sLU{8@6(<}$ zS5JuKVT#K;Ogk(O(+92xj7hRcOQsZZ~ z8s>oleJe?^L=eJ7B+6y&-#-n)`53d>{ffISMoM!Fs$i(JMjb0&HO0Rpdl>Fog~RM5 z0Bpg}fpuhBt4n1kM~Y3dFf{pHJP>9cgEKMa?&8>jSPGYrLpeQO`g~unPjT9vo%}5| zvH65U?1(+c`P56P*Uh1MZA+wf2AIbY)$q(n)4OAyHFg6|kWx%_bHd;Im}HnEgKHJP zABtog(R@=(_r`4yK*Ers!>_l!-Hbo@R{y%`+Y)qC=FWtA-PkP^=y*y5xR)x^caL+6 zIv2bRoRQ19!U?KEJ!aDtZW-QoVfk(0a3t#_Z+~__nKWP?A{u{UG6n&RbkNlqR!%Yx z`^4Xx=P%OwlO;iv2Tg+>^Jw&?C3|5Hckgrmo=oc+wa#>*`miUJf#+7!*OOmH`yK0K z0`D@P$~8Mng=eD6j}8j)9dIS5P;_4PinC__b<;i1pgh)STLu3x?IaoCToG1=de+!) zevfWZwdpNh+dh1+5dvDNzB8~#ZonhwX-8S9zHK*xdw;c0UMV~X1J21)N^4Yerzikh z5NE|aX4MlGwc(nx1VbR7+GyB~98Py?exB{eUgF9x;s`ejjisOL?nF0T)|T6wS0(F@ z$rhpx&Ld+Vjkp$w_0<~McDTAE8XHz{85-+P3}Dg*_?Pxqe-Wp#JpFxaFq?Y%NJ2+A zoHeGPU>BGm(@StVnJ4Sljd0*s zn$eEmzjd*LXKk);nyXNE$Fls+-X_-M?e5e*BFUT)-m%S1)d%C4?34C*N3)ZknA1}U z75=?3_!x~os+8>`SFjfPu<4iT(qE7=DZra@40%veatfSZkEF@2#}rd59E+w04ycjt z*<3ykvyG7B7d7B@7*pVb^ZGpTksb-3CLK_`3hd$V=h*?3LimWFwfB8%;npm|zJ(GZ zrlI$yHV7VDN_oQS+bWcl+gFApXN@cFzoyX<3+#fwRGp9K3HLROzMa{0Ts8WAXmJ01 z%c_#*LrdA%^_V8{sD8>+2*%czUM&ZSE;+rOH6R5YZEJ66@KBj_nt!;>Z5=EBo{&T1 zvBb$~iCy;*P7BNCi9e5}p7443{6e3U>Lk7x1_cLz7}(f1=h1lryc&OKis*J+6f>7& z=J6YB2l`ut-VeCG(pcuRy(WDQ1Yts9^$%;0k`EJ)A>@ZDrd8c8pa}Ze*I0j@Ts{~Z zRZpEtB;DB!e57b@vw-gvzCz;C<=&NYs@rxhqx>3Mt|az~Aw~ZcDu3bTN9lA0RL^o^ z1ko8s0C7y>X~l(kwXH?Z?)&}yiAO{vD8kb;j(A~>ktez`5a??|#1zJ*<8yL2H1om` zMdplgwb?G=@7?&DfiO-xP6Fo_FxVdNs<&sN2A9JUc$=ArhPYu}_Y_D)OJ0U^#RfH1 z!beLTHVs`g0N-B}s?NC+)^2!-&-2ReEo{=OWYsKirXU-ux!f63&tTG((Bl*8x?E}=P{eK_T}ZxAsO1VouAh!_8m^` z@$5@8vLJ`03t6@g^I-$^M4#K;sy-}!OsvIulQr4WY0r_TY}G*yXqdDJwQ&h@74EBS>aY`z6cOjLhs-LIUbCt_6w-^reNL|ggd zeD2SB8XoMr*e=*y(ze6Tmnt5>K8$+i`I1h7UP*4Fh2$>!i&AEmzBMk@IFH{?~Z@1giok@3HAuHUU_ zaDuX6&_`*$O$jcO8B9-2&5BSL^vOtiN6m*`>6{o!p`V9iq;rNBy&v&_Ob0$Z>|RFp zy~;(u^>?9&_n+lj&SE4xg=qR-^MulrZn}FkzqKn-WY=w_K)_8`5~Ld&e0C1#aGk1> zp^_TK0m1;l=HM?s6rT@Ahg-b89kjS~@LE zW@2Zl`Pn`L&mK(gOPLUK*YB%&8TIaflw922EGK`o%1-?g)BU(1(2sVN)h?!Q)H~n8 zYi%Xug(?j}0V+*5I-N?4-BM{Oe_yA_dz}4~`qJ!+_nxDJxu6-RbC}`hjC3H8iA1}P zS@^z6;6~r<>|cm6L=IS(C4&wVd9dt0PVRmMUcetF8*hka9T22zSMxdHiIyNH@NfeK zlB*K;L!Qq;^m&s4vlvfBHKN26))riL_7+ygCGxZ>9-!`JNZ`Cgy$*Pn2R~~H)?bKi z!@L{PZRIs>x}Z`_(SU-tpzpl+HGLjkd-GWJO8H;W3twjhFGewv_#bfs6*diJslMb; zp@MvMKstpK5J0m=8Qjni28n|gqZvPfU(*9lV$&^qK5SNF13e->^pT{PX%VFfK|oo0+r~%i%+6rs*witHsB{Y-4CWc3BGV$y4}E8S5fE4 zV1Cq--`Z{K)h{5W$b{CWd<{P@#_7Gc;Y2N$X(TNJ6Axt`g68f12#s<38MSJiyq3-3 z;06%DNz~--5o(}J=^?e{Yi*FIpJOu*+Kr7;p)*A^wGDo*VBIx|$Bu-wJ3ZH-3&ZZ`MGQw)e>Ia){)PYf$*9{WPYy`N)+eY zRsT35op*FhOJjsOf)d~FpZ3-xmCmawGDI<@mr}TvOew6*G%Hw6!rBrok%_$n*&6Qo zk;K2P|=83&br39MCwULUtH}p za>0PJm)iX}K!M~7qC`tw98d#aTyarC-C~r1WO&jA@SfEDD&?<|O0jbQ8_}RJDC7ox zEQ_E0NYf)*yT0rHzH!7tJ2&GBbN^KXxQ#2fEO;ry$2paL`Iu79=Hp^#JYY{5C5CS1 z{A>4I>$W$T3m*U%*sCy0`jf|>aPfJ*kk)o%Aw~=MDtQ$`3$ksYAtE$uYp9B#K@_0V z#om7MR*bZ?dA;8C+ZyM^4u4aWC2;z1=l(uUVx(_Vbhyb<#ZxhJCUA^5&hc^%#Qz0P zU2gg(WZ{&=SOg@ogmOa3Rn82Q58hZXUE|CF44x$^L_F{}1S6eOpNuUPhChcSj}jlsH6%Zvs*Z-%ag);of~9q zT;=Rs+jD!%n=iI90JzjJm+i86aE~14?5Woum)NXi17=Jm)vxY95z&8Wozi_m!VUOw zAQ^GA8vL{oI5aIQs9xo1e#<@K6M$vNEvU4-1ZkM_J9-ebT#%wRf7q1i+hRA$-T+*c zn|*m@AeT-z%!M(n(BX50ECePsy|I)2 z`B&SLh{b-edRbesCpE)Sr~t%&Uelb*zvJKo-jI@nNDTihO7pM%$4RgXB+Kw>p_Dau zEjZPSV_s$)PFJl-{%0*02|(507jV%$VT0{KlPI&HId1QUMk;5Y>XG4DGJeC>7@gLB zlbPOQGo)Jl9~MlC+<93ueJ&hKaH>uQRf!h)r#OH4w4yr1x0&omJN}w9T*^7gPSDb3 z*grb^J`ROW$Npuz&v#htdve1+$B_X*q*jo^3%zv~PD-*PNY6L?LSv^108tY!WPM7<-J>*`sl!9E;#~^2k4j7^V*CtCAw~w^IOB00@Dh-RZf)GJ*3m z49qA{h#6!+j(AVVv>k0;L1&PIXYiU@wsl@j!aF(gtaWTl>nrqoa_(EsJt^{d z>f{13ef;3|DlR?l7QKnxFHIbl*b!-b1bJDuT#KgwptZJVZV4!GuMWqGU$fdfKhVaH zsrh-{RiW>_QX>cA+!gh07Rel!y|ze9Nk0YD%d4F1Ja+b_j8 z4Ro+rk02uH5_ge#_~NfQZDsh3g`EP*7Fqy(CiLbP`*gFB^Akk96Mqfw%6o7LV84xu z$!3V51+xo2OtmQhDcMSLYP*2K#-K@-BGDi3qc>(lsQ8&R#0V~}jqPUiaQ!(v=EC;A zP&@!)$#HaRIyU9WdCPeq1-s`$Dp88<39)r)95o|SnrwOtYwQH6oMRi4j7F6MlTfQ%vd~o>cSKQ{=&|;`k-`kl zo^RXB`tx*sxM}~SfT}Wwp5jU*4n%%BKEpkEi&-P9V0Eck_@qh-b&E{`PYCJzyUUjG z)7j;lU(o=UA>%Og5Jp^rzhuygb775%by@Cp6c*Qp6u)}Hl;nJ}b=!iaold@zV-MfI zWW+uhas7Kt4j}IU&>KdWt-NHfuSU-h$KImn>~#eohQ7WZ_?YxXU<%da`5eu5tO_f< zc+oBR5UAUaUSw8pNpsdJCOt^wv?`$333#77HVvk%` zxW>lnUyoxk07?K5Gl(0Kzst-_0<-tXb*~R4)gNg+lfr{)8kD^Uj~Q1x15h1+g2rK# z&9A1LHJD7~^Q;bAO0dQOvDWNGi)nv;{TG{%0n4Z#hA+SJxld`EOqwO3{oVPeTeWpm3Q2sxqaKrvxVV%l zeXAEK8oH*7wBAS;d>>SZ=*IEK))USQ|ru_O` zqy+0M@njW?nKO!dgsZE=>A-w11npiyo^QtTXZ<8z?BqcK)E|!)^&C0~l!kKJoF5~g z^er=JnY*Fn?1NI?5SoG<4zGu|iB$PO^l>3-w!kV!;+TISA-@={V}q2FXIpIdw{Z|( zUY_^^m9&&0X@cLYte{7K5KwJkGH1{ap@}Sp4YP@XH^0C~&nUc7lH$~S@|sxGx8ZSNRB^!>N;A)*=z7GLWK)Upkgwr_J*_Zf)sr0qn;(TxgVMDso(@_=vVHe@*YF^m+Q?J*A@*yGOn~l(%FTRms z zu5$)86z@W1C?}&NO4pZ6inb(w7vL9|DEXp-0^iV{I}90sB>mRBsEU!=DmS1b={Z#| z4HRwmtZjouNOA!l(l(ZWM+n{!W+{Q(N;Q(AJ8dwSm)1i8k+nH#hsdxjXEU+t8DQI) zun4^^j6Zz&-r_N3PXsU&u#aHpgB=KIc}rgBI|5Lm{9I0%}>o@^%p5%cU0s zA_f3>6^bSlIywZjD8vRismV^1YqX!(tX~8eN;^ab$}|2JL~2`* zw(*VoXkq^^@!z*Ju~)Z7e@r6DhEF~`#Zq&*OqPt)6yPDZ{JiQaFxT$htSE+qvs@NbZ-;njgWyFY zPbHIzVoS*0W`8@*wUQAO31AF6l~E>`G>}Pl)wXy0Te^COd#*RtTdGO4jLM-ZiLJ** z$!-zGu&!XZi>=S1Q`UgJz0}S+AkQ$nyZ^ibF!Lbxl@(bI%fqh?fiB$7d%Tm`S0@I5 z0={{}_+zgIK%s5eJR9~-SN;)Ys7E9`ZtbixVOFkOc|6{E)nB{*I-rBZzSaC1&ADiG z$NC&=cJaAsy6kC8h4ZydnpnGIMEnW6Dukuh08{C$fWs*e&31U~Jd=INWq48)NuT3@ zDBOwD^nsxp=cy9=cHWG;>Lg?ScvS{W(Xdf_twe>ijA6*~6Q4ZS8%bA8jsGn*cSBiur7nio)9wTe;~&Hj%thh1Y}IC_u5A zzes%sP;ap>b;(Cb66S~k4}cOag)LGNKr^q8eP(^`hUCwT=j9CB2%o}|OdDN^nV808 zLEN;D9k?P2@`~C4hz-H@8N_nSYDt=@3c z0EHaj;C788f+RP9ClTr$bQXDX;h#`)dhgq0Iv!8_f#{RFOzM4p|YL^2E2{;zFg$rKf5R|@73qF_bZ*WCGps+eCxN* zmn*P{vc%WJ6aS&`xvI?5S|2(+Im%Wby#oO5>Tf;0vibVbTnX)8H#r3hIr1h*xPM7kJt%;1xoj1hSg_j-Q1n7Mfs~D99?T*?a_{WIS82B zufxB1goS`zZH&G*^*AwQ6kn1zylz>xW$`&6NubFBF4I@uh!jl(c&1*0sb$*`$3x|7 zDb|lT7BODG;DNVuC_vy5DEe&>wnumgjuB`?2qQcpX|-k@)&|b%RJg(+ST93le+8?= zVkYv5oHYiJd)k;O2{B{K!#8X=?GCx`qp_dV9Y;O`a5B0IjRpZ+zuA0={%mN5*T#%$ z0E7$?X71i9$zaJxP{7IYb0E~dx%VWz+1es?#SGM!Ne4-Otzqvg$0EF47JA9gkyI*X z?zYU-B0jfS`qX23gNkt>rqm6f5Y#%`V;kwuA{HY>GhwICty;VKF^{Zv)(?LCh!scW z5WTC8Z*o9-9cO)eeT@bGHGyX?O{Ra+@o4?=#wWR54}KF^-N0{n-PXDd^f8QKmIHja zql|YAe1I3E#K)~c@b`ul*CNF)cPg=@JeYMe6r7bEe&|ZO#(KoKnaj_ z`zl&h_%`lH3{T}cNAM*<$R0zK3)xbrme`5CGnTW9rib@U1Se;!5(wzf-mz`Np|t+M zmGmvA(cWV<3lL&PSijdqSqkr*v9j~uW8tx!n$!JDWPlF_u&rSCm(Sn0i}z0gU&04F zgyWCmF+HzS)aSx>ZPlLoePEekW2;Lx=Ldb8qRBYdZpW})KWF|C2P9WnHmo+16xAy{ zRA7H#S!0)@Gol`y`4P>XXZdAkR(LuFSNU)At-FjpWji7HENVsrxIehB2^aU#pN@eM1aKE8tl>R*Sz zv>t&M6B`ztjEGw?-7hkQWs#?ZTRXcdGI?v3Y);pwk;|MG)SgC+$07%J{ zF|3JM7D{4PwrB#gopkB{UbsY`D1OU4nB)<&MxorVzq#|uS~gs0bU1>aMpn{XNWZ1QB7iA5@>ni!pw((7r1zZSOn%y6a{P~ zPJK5ruc2R!R?Q@O^-)|K`wKsC`woLBmvVPxrnkzY|{&)VP* zz?BfDf-RYb_5=vi%^-Om7_qcLd1ex(N4nJ@E?{2AV$G@km6%QE5VUy6wg{wPlVxFG zDRPnO`c^4Uqj~4DLwHo7@ ztxXQauxos|j6iSM%t$`fNhnJ?o5ZF{YtEB`D_xQ-s(OR73UV&jpl?L4lT7cUac0a^e zN=YpIg@C4u*G=Y(w9Q2;r^!InNA|57{!o!^ZSv)P{K_4hJyQxEVS55V==ZZL8d0$} zH=cq@bC#kj1JuH)4sz@i2hOH5_7-VXiTM}wPPGJ1Ww^hOL!Hk25jmk0cYoZ%KU2ZK z)pAo&eI-#CutFmPrLzc(G}Blg$jITH(!r#dHZyg@JG1T2_4uC_WC?X zu7E-+#}3&qaAA##Nhrte&dq0B?DO-sDM4d$dH8fK_lrh6Shh5(WxA9)=RsIb#mk>p z)SeCRbV&!&T))XSNGSK3=5p00 zw!(4ccSCWt^=QU^3i&yH?h)e2FQpXNKsV`{xjB=|5?ji)>~ulW;{sOv7Jxn1-IjOE zxaRQA2gyEk`n~!K^gK1-yR_!5pVNWjHWlw2FeR5uPH^R54fwK&tGUOg=Sb<19&${F zqEx80a#*EqK=0i7OW!(j7)i-R?)SfpsKCV33C=$r0qS8=lWY7kRVZSXsjxKAXXIVH zf4)`wN(G~gSmnUtBG;cB+j8m3^*hBGy9Px7_rWQ2xqM;c;FHCBVL;ab3O&#mSmk>^ z;{iyM=mu}}m%s9~|BuU@{KX%Lc-u=6#%%ls?{C}Nci~%!+(1x>F(-J1)#gkG0;*QJ zeCwss>3_Y8M;3YVz4`WS2SR1+v#(m#UIjMr4Tty>+VbRO5HqBpMph;#xsC)4SH4rL2 z0Rb#;w1NylMng~qqx2?3ME97T#Nu6zg*J1cqTD};?+&TxCDa<`fr9&WQ~H-*yj@k0 z#${0ar7qCTc$rVEx*qWq|=*Tivmgc2qkx1AWmygh;D98tmRF=S6H#P z`J%8Jln26$PaRpg%ya;+)lZCx;13iykui9W+^%-n!gk{1IKK&$`iljUgDXr~RhI0n zCQ>*F2Ts2m$>nRmrgYTW2;WP=S36N_8+(~JX&Tu7TjgrU%ZI+Nqakbcm;sm@M;=yc z)m2)iB1zHKCWCPK*5&iD>o=nR+vXeqE zMXb?Y7fu%RxDLB%CPlX@CUL4DNSH?2u+5be2zj9!0{jKo+#)0)6PgAbvq;0pga@9Q z>6{)l*y$o*ZPH#QAsT>bGrIuqWN$(M7KS*KvGAQ&+$rTQyB9Yitck9J1{1$Lw#bb!6vmI?7v zl++<=QTU=+Sk0>C1BE&y(xl3za&UK$1FzY^n|)Hn8^(_p_4~_Dl~Z;k7qj3H zy-a)s6|Ndh^J-1JYEAQiFTD?{aW?@p+Xe#b5&~A75wmVGNmv+yg1%vfE7j9-R!S}N zP$JYJMkLgnta$$KIBE-r6h3?j)beC>e^uiq4kjL0P27ah6-AQ~o zAdIym;<3%@hYD*2hZ(yD`t)6bpX83R)N zipGmy#s>C?0Rcv&uKOgMcDr?LL0DA?XLSwQZ2#Qy6};*<_*~LmF$?yaG@kwI6S-ba zW2x|Bc)=ZYij`uw(T#Gi*6aIjy(d4#D)p9_on&ww>NK%(I&DX;g56qgfATZt$wK|? zP2D~9>wKnriWSPS(;o?v+GGQ3QO3Z^Bm|UiEEsDoW28D_^{RRNy|>kSuffTXB?7U&WY{UaaotLP0rl+xKpDJA-}`F zu>x0}R1UJm+0bF`0?kx|y6t(_^ISlqs`+oy*3eDC!~*+qvoAMJg1^mLT+3LaE)(my zfhBlb+l77a0ShP$Bq(INGl6EJc63s?$VO)exFoq#NHjXMtf*oJ^=k0q=-i`6( zS%CuNgX^Ae$-%odQ_s;&2V#o}?0P0lY0b}uA_nY=DX}kgHd#TmtIYJw2dZyDTJ3xk zZ{~|4y(E?fr%ip+`}8+Ru3}s5;ZrTVW*Z3wU+A=)siYK+9_?W3Fb#Rho;{er-qT||gdcgMWy2}s%qiIhV`{W}=Sq^X>IN?ERP@ic;5%Cg=k2+1O4tUmdls|30hPKx5s>B{ z#(-SD%K^3s&~vry{j0txogFg?-B1qtht55Re%%xPwVT{u`KtpXH68uy(#`H}V9p|L z01+tSbpJk@!4BPP^eEju8-;?Z$T6!6d_C40|M|_!gFO@e`ofYaJLhdH_EYlS=TWny zIQ6DpgI-5+0)D#)-#WuxAbV#K9t#9i*4bU2EKp_G*^8stau?2eTbYi0g*N7)Z*#7c(1< zBc9Wj9vr>)lTS-?@z-)lT)ZE51nvjK$BI2$^^?TQAQ8%7qs8-}WRh`mpK0o%wpq&Y zk1XuVz;EK>sxp@b_|I?*VpbrXWIOUY&LJ1Ty96^NKxNIWbz1#&*L^ zFIrg*QCGLi-l41{N>975_h=`y%lO!E?d0Rc@}6ECnRPOyDK6l2^$L4-$(`jTYv1Q0 zlplJdjKzd%^U0)+<;5Xzp8AO4ZlNXPs7Yy-%eA_`#@?wolr9a*?rGiRFQQQkLo!7T|k$ z&1UaZZmr*`4UjBaU+UPlZY~JM#8qm{MuslnAk8euIo=JzvA}ScRyGDe6 zFiv`+<^3$WH-MfOk>@*Lc$&U!S8D3@+x;3=cm~%BP9APxU1EpupO;84&R-2;`JEPe z@?ixd<~IMMIuOih2Qsun8)??R>ljtKYnM-EX%^t4g#cHF2?{P)^1Ox)NQ|DGt)~NH z{MWW+(_#9B@KD%^hv*Vbb~3IJ?tz zoOH0;h+n2{1nD~C^WwKvR6SWM(NNpAUK19at4ZYTwe6=83J{vt-%4?@9+`jui%45W z^+%pFxMuQrs8ioknysRV3sJG36hWp>q?Au3_)x|Ex!13Z8NKaiI81S$rDEmND%;+a z-7`uet@j}xmdK2?WWuuu6^Hz>e&-4`b^e#Q*m9d1EqyR` zYGVx>TD(Fd(Z$P$R>|;v%hfF-dhb&)z&A1rkH{*`8bE-0=6+}`LBvb?_CMF|-{LJX zmyna`Ek7wuzH805s2u~0au##|#P!=~#JQBX|7Up7H65f=zl z_&u{6tNLQak!^#uqz;@#T~U~A;JO(}=Iew`=DvFN{^YFdnEf`BZ*eM%22QCn2l;;(OY znwo{sq8($Oslx#rXQ5LcOOtt zujXUHR360*p=#cM027n(dkJq-TEc>?1Vao9mUaPf9oKoZWb<2!=;J5Qw4_-LAVw3sOF#!&8 zAp$ZXCN4v93_otTo6sk-&GE995G`|SxSUpU?UvNQDs{Mnr|j?*->+(xgD zYOqK>43Th@$b><-^kbY1sblxk7J7EIYDvV2<7bJcAD=}lSS7W~^djEPXG6(<(+S#aLJq(I;JOjjY#g8C@TkEw4e-Z|_iwN5G<8nt4jNcgrIkKJpDI9)# zQrUm85m%sMu6BQSN^?VDQAs${x9FgToi@97Giz)wDzP{N-Q8sJ&X?S+s06TFJG+#h z36kkx8sv;Rc#l$1PVv4`hxL1U6?Y9UxE48z7#}!l%vqwgkwk2O$?As|{F@f@PQ|*XG<#qnYVse}zMA{976Z=1U`R@oMaCSkv` z={8;|HRrCf;x7c=z)^O%>UVf7qhlNH-}BW@s)n+m*B3uRhuOTkJ$X(a%z)6Ib@O)| zI$pJ!B>p#|6m3v*<@%7%99RMU>Q$B3Nh2T-Jz8!;h)B47p zO8a!sFjVZ6n!`Ba)si>rg>e+xs!@Al#ILt#zrbVPl4EICCV{X`uzL8GYzCH^FFh7C zODT=eId***W6c290fC*qlq%9?>6*0OUBs7@Q?jYdzi=}d&npp2w$ww1@;Nd@u z4WGzw3p8`y9-Q z%r@@ayEfmIo8GALPMC!SIDsL>D;S?qu?MAxFSHY*g$LyS22~n+unhg*wF(tFP zttqWwJ?qODdbJ{NQ{)%Xvf?5#hDXnJDCL$766N<;fN~BnL7=p6@cwSl^rFae+)9id zp|KSbP}KT$(;E@szv~XirH}q3NKrGK|9Gz<5osK42&P7y3Hcs)1a#n0xd?1l?kHE+ z|5PnI$lT9PUsZl_LH)U|%lFOR>)-A1kT|Gk?DimiisdS4f4}rIA~$WKbqS8{tbPV; z;jWAp!WLb;gV)~e>bLH(z|3GiP`XxMmyt?dv!=Y#h{xNV`ic%V6rvrh^fSi23v3e( zLNx5M{-GgIv@hvhwhKB0)~3awBRY=ah9~brgKGzZnZ7sS!WEUTzGR-gc_biNg87Cq zZA0-C<*RsG^xkBN#xTh*k={NktfUds2j^SE+&k9+ zBLMOv!k)VgVVK17Vg#t;9aFJ|gVD@Hb|e&V20%wzlOsoQj3XZ?n5l#;jL0S^e<%-N zqB65ndGkw7&=xo1Mxt&ys7>-txDG#V&wcJWcrT;*l{f2>_V3h?l(4wnG|u3+z?|1a zTbBHXA|vU|0eMbnyARD*=Wp*3DdP;Uzq0+^r24abLw(VG{#K2->h_=lRmz%ae2L{P zp6e$hmzG3h3<_(FM+|Y?DQsNa&66`C%Z)*hH3jv6 z1FlZ+dI)q1!E_j+Vf|gJV9CYwo!dphvrex@#zW38-LAJZD{)Aiqz!TnOB-D9da0IMV3;Q`bPNFLVY4h#*$G>45ikul51x@f z8Ag8@RoYM?z>mUJ7PE4zUFk~b=*f;hs|Wdem@ogfcCljH%~6|wD7h&2Sc*ke&Owsp z%#Ika5s(aN(b}-wHFa5pHazg2Mvf}8klzMfmNw0HRxvqq8%D!NGrjB4DAHJjn2?Xp zc4IrKSF!FRK^zL2Ggh9DGPP2lSRQjZn(m+LkA#bb7M30Xml`EwCs%-Dht4tTgrXAm zOaFQPWvqQ2rvz9Q4EUBO3yxu9`To%cz5pW0J-%teuvrcteN*J~%dfI_PBmiK{ERX5 zcvGs{*(-<5kK zVmR&;iiM_a+A!I&iuXu}=r%HD7;D0!v;_UIwN07V0L589xcIV@T3JL$AmybqbM&lQ zb~k5@;R0?=7!``10!%aBw~!~!?ykUju%0DEls&uLqB7?~gbokd6Sd6C*ZfL`l^Wr| zztIENDU3p!0ZN6%v91Gui6xj)P?|`{gHetE|05xQROI`m1g;MXt#nFB4D?M!9M z75>}{ON>3n+k1B1P=B@@t{{np-K)`|e+i8e3!-@kp1Om~Z9f zTRn%{wCDDDcf7$Hl-majsU72n!+wG4ix$Q2 z@p$+9O5DmVz`A2(2iCi90#!ZVWMDf1`*o&H1>}x?kX%1vaHL;{+0hsp+s~($8!YV) z!6Fa9g!_eS!D@*8N$t5?%jn31yeq+?fUWt6y$7!-0x*>?7vFoKS%Y0!Qh5|MglUrn z$~BJ<#kYg9qb*R_eBrNerP04Wk}6u2qayp3_M!r|X>=2x|7T%=45jkUdAL`%4pOk+mBfRWrA-=Z z*aXc02T^wucMOP9yL`?87@zsBIs$amqCg@3T=Di%g@nw2Pj+b%{;ghPC_1bco z;IGi`p0_A`TC>o&#jVr2gPHVkcfE5!0?CQzvF27jGZKBH$hllVME!dHo!Th`N1(8E z=_f(XsD`?$v?aSflWe6x{&tyXJCy3P@{bmRrbKd`^-eM`xW+__ z=hv!5Jh=DGvT{E4Me_M)LRG1+eq`B`(jRsJinaNQ|8t$9S=W7{bV2?Mx6k;KVlFN9 zG)_v8IEbH9s>dbWqD>^WVtQ(}I6{R1Pkhp4wU+^jvrLj2gZVSDVdi&!HnGUqAD#w% zn-A%vPPDSBgR_o|MBa-4UBB?DH^~SkKkDiUSgj{7CYJzL@Kn}oX81SzoZt0j3T5*t zqfBM^jqt3AyV|Ua_5d*$`4Zd@vYv7T|Jo%|L_)vV3eXB|pk9N?$CbN2Plg|1B z#%29LQsy=N7+pi^cb)ip{K(XFms!&em)__Npk?#8<9@sM&!SAwPC-A+>znr~zq8d| zIO0~+1Mku@U1*tNhCCoQm&WANHhGgbE*=i){g4F4TQV`Lb9IsT{s+)4|90UZ&)=_TZ(BO%e~H_dcV=cf0p62~*uU}Y<{xZYtz z5846F&}7ovQg}kxvF^|JEEq)$ z1Qk@w{}owFM{?>VXo!PiuM`1>m&n^s`q7sEpMGBWd%j`eQJ(axp{%o~% , - description: - "Reliable, Scalable, and Cost-Effective for LLMs from Novita AI", - requiredConfig: ["NovitaLLMApiKey"], - }, { name: "Together AI", value: "togetherai", @@ -303,6 +296,31 @@ export const AVAILABLE_LLM_PROVIDERS = [ description: "Run Moonshot AI's powerful LLMs.", requiredConfig: ["MoonshotAiApiKey"], }, + { + name: "Novita AI", + value: "novita", + logo: NovitaLogo, + options: (settings) => , + description: + "Reliable, Scalable, and Cost-Effective for LLMs from Novita AI", + requiredConfig: ["NovitaLLMApiKey"], + }, + { + name: "CometAPI", + value: "cometapi", + logo: CometApiLogo, + options: (settings) => , + description: "500+ AI Models all in one API.", + requiredConfig: ["CometApiLLMApiKey"], + }, + { + name: "xAI", + value: "xai", + logo: XAILogo, + options: (settings) => , + description: "Run xAI's powerful LLMs like Grok-2 and more.", + requiredConfig: ["XAIApiKey", "XAIModelPref"], + }, { name: "Generic OpenAI", value: "generic-openai", @@ -317,14 +335,6 @@ export const AVAILABLE_LLM_PROVIDERS = [ "GenericOpenAiKey", ], }, - { - name: "xAI", - value: "xai", - logo: XAILogo, - options: (settings) => , - description: "Run xAI's powerful LLMs like Grok-2 and more.", - requiredConfig: ["XAIApiKey", "XAIModelPref"], - }, ]; export default function GeneralLLMPreference() { diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx index a6e4ab02..442a443d 100644 --- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx +++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx @@ -39,6 +39,7 @@ import PPIOLogo from "@/media/llmprovider/ppio.png"; import PGVectorLogo from "@/media/vectordbs/pgvector.png"; import DPAISLogo from "@/media/llmprovider/dpais.png"; import MoonshotAiLogo from "@/media/llmprovider/moonshotai.png"; +import CometApiLogo from "@/media/llmprovider/cometapi.png"; import React, { useState, useEffect } from "react"; import paths from "@/utils/paths"; @@ -252,6 +253,14 @@ export const LLM_SELECTION_PRIVACY = { ], logo: MoonshotAiLogo, }, + cometapi: { + name: "CometAPI", + description: [ + "Your chats will not be used for training", + "Your prompts and document text used in response creation are visible to CometAPI", + ], + logo: CometApiLogo, + }, }; export const VECTOR_DB_PRIVACY = { diff --git a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx index 4ce2745d..7a16985f 100644 --- a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx +++ b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx @@ -28,6 +28,7 @@ import CohereLogo from "@/media/llmprovider/cohere.png"; import PPIOLogo from "@/media/llmprovider/ppio.png"; import DellProAiStudioLogo from "@/media/llmprovider/dpais.png"; import MoonshotAiLogo from "@/media/llmprovider/moonshotai.png"; +import CometApiLogo from "@/media/llmprovider/cometapi.png"; import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions"; import GenericOpenAiOptions from "@/components/LLMSelection/GenericOpenAiOptions"; @@ -57,6 +58,7 @@ import NvidiaNimOptions from "@/components/LLMSelection/NvidiaNimOptions"; import PPIOLLMOptions from "@/components/LLMSelection/PPIOLLMOptions"; import DellProAiStudioOptions from "@/components/LLMSelection/DPAISOptions"; import MoonshotAiOptions from "@/components/LLMSelection/MoonshotAiOptions"; +import CometApiLLMOptions from "@/components/LLMSelection/CometApiLLMOptions"; import LLMItem from "@/components/LLMSelection/LLMItem"; import System from "@/models/system"; @@ -272,6 +274,13 @@ const LLMS = [ options: (settings) => , description: "Run Moonshot AI's powerful LLMs.", }, + { + name: "CometAPI", + value: "cometapi", + logo: CometApiLogo, + options: (settings) => , + description: "500+ AI Models all in one API.", + }, ]; export default function LLMPreference({ diff --git a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx index 31b7327b..9710243d 100644 --- a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx +++ b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx @@ -32,20 +32,16 @@ const ENABLED_PROVIDERS = [ "nvidia-nim", "gemini", "moonshotai", + "cometapi", // TODO: More agent support. // "cohere", // Has tool calling and will need to build explicit support // "huggingface" // Can be done but already has issues with no-chat templated. Needs to be tested. ]; const WARN_PERFORMANCE = [ "lmstudio", - "groq", - "azure", "koboldcpp", "ollama", "localai", - "openrouter", - "novita", - "generic-openai", "textgenwebui", ]; diff --git a/locales/README.ja-JP.md b/locales/README.ja-JP.md index 8920b218..d6fef0fa 100644 --- a/locales/README.ja-JP.md +++ b/locales/README.ja-JP.md @@ -91,6 +91,7 @@ AnythingLLMは、ドキュメントを`ワークスペース`と呼ばれるオ - [Cohere](https://cohere.com/) - [KoboldCPP](https://github.com/LostRuins/koboldcpp) - [PPIO](https://ppinfra.com?utm_source=github_anything-llm) +- [CometAPI (チャットモデル)](https://api.cometapi.com/) **埋め込みモデル:** diff --git a/locales/README.zh-CN.md b/locales/README.zh-CN.md index e3c63225..aa328351 100644 --- a/locales/README.zh-CN.md +++ b/locales/README.zh-CN.md @@ -100,6 +100,7 @@ AnythingLLM将您的文档划分为称为`workspaces` (工作区)的对象。工 - [xAI](https://x.ai/) - [Novita AI (聊天模型)](https://novita.ai/model-api/product/llm-api?utm_source=github_anything-llm&utm_medium=github_readme&utm_campaign=link) - [PPIO (聊天模型)](https://ppinfra.com?utm_source=github_anything-llm) +- [CometAPI (聊天模型)](https://api.cometapi.com/) **支持的嵌入模型:** @@ -200,7 +201,7 @@ _以下是一些与 AnythingLLM 兼容的应用程序,但并非由 Mintplex La ### 怎样关闭 -在服务器或 Docker 的 .env 设置中将 `DISABLE_TELEMETRY` 设置为 "true",即可选择不参与遥测数据收集。你也可以在应用内通过以下路径操作:侧边栏 > `Privacy` (隐私) > 关闭遥测功能。 +在服务器或 Docker 的 .env 设置中将 `DISABLE_TELEMETRY` 设置为 "true",即可选择不参与遥测数据收集。你也可以在应用内通过以下路径操作:侧边栏 > `Privacy` (隐私) > 关闭遥测功能。 ### 你们跟踪收集哪些信息? @@ -214,7 +215,7 @@ _以下是一些与 AnythingLLM 兼容的应用程序,但并非由 Mintplex La 您可以通过查找所有调用`Telemetry.sendTelemetry`的位置来验证这些声明。此外,如果启用,这些事件也会被写入输出日志,因此您也可以看到发送了哪些具体数据。**IP或其他识别信息不会被收集**。Telemetry远程信息收集的方案来自[PostHog](https://posthog.com/) - 一个开源的远程信息收集服务。 -我们非常重视隐私,且不用烦人的弹窗问卷来获取反馈,希望你能理解为什么我们想要知道该工具的使用情况,这样我们才能打造真正值得使用的产品。所有匿名数据 _绝不会_ 与任何第三方共享。 +我们非常重视隐私,且不用烦人的弹窗问卷来获取反馈,希望你能理解为什么我们想要知道该工具的使用情况,这样我们才能打造真正值得使用的产品。所有匿名数据 _绝不会_ 与任何第三方共享。 [在源代码中查看所有信息收集活动](https://github.com/search?q=repo%3AMintplex-Labs%2Fanything-llm%20.sendTelemetry\(&type=code) diff --git a/server/.env.example b/server/.env.example index 0d3d1ecd..4e5d3091 100644 --- a/server/.env.example +++ b/server/.env.example @@ -106,6 +106,12 @@ SIG_SALT='salt' # Please generate random string at least 32 chars long. # COHERE_API_KEY= # COHERE_MODEL_PREF='command-r' +# LLM_PROVIDER='cometapi' +# COMETAPI_LLM_API_KEY='your-cometapi-key-here' # Get one at https://api.cometapi.com/console/token +# COMETAPI_LLM_MODEL_PREF='gpt-5-mini' +# COMETAPI_LLM_TIMEOUT_MS=500 # Optional; stream idle timeout in ms (min 500ms) + + # LLM_PROVIDER='bedrock' # AWS_BEDROCK_LLM_ACCESS_KEY_ID= # AWS_BEDROCK_LLM_ACCESS_KEY= @@ -354,4 +360,4 @@ TTS_PROVIDER="native" # Specify the target languages for when using OCR to parse images and PDFs. # This is a comma separated list of language codes as a string. Unsupported languages will be ignored. # Default is English. See https://tesseract-ocr.github.io/tessdoc/Data-Files-in-different-versions.html for a list of valid language codes. -# TARGET_OCR_LANG=eng,deu,ita,spa,fra,por,rus,nld,tur,hun,pol,ita,spa,fra,por,rus,nld,tur,hun,pol \ No newline at end of file +# TARGET_OCR_LANG=eng,deu,ita,spa,fra,por,rus,nld,tur,hun,pol,ita,spa,fra,por,rus,nld,tur,hun,pol diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js index f0796be0..064e299c 100644 --- a/server/models/systemSettings.js +++ b/server/models/systemSettings.js @@ -610,6 +610,11 @@ const SystemSettings = { DellProAiStudioModelPref: process.env.DPAIS_LLM_MODEL_PREF, DellProAiStudioTokenLimit: process.env.DPAIS_LLM_MODEL_TOKEN_LIMIT ?? 4096, + + // CometAPI LLM Keys + CometApiLLMApiKey: !!process.env.COMETAPI_LLM_API_KEY, + CometApiLLMModelPref: process.env.COMETAPI_LLM_MODEL_PREF, + CometApiLLMTimeout: process.env.COMETAPI_LLM_TIMEOUT_MS, }; }, diff --git a/server/storage/models/.gitignore b/server/storage/models/.gitignore index e73faa05..7f5c5f8b 100644 --- a/server/storage/models/.gitignore +++ b/server/storage/models/.gitignore @@ -10,4 +10,5 @@ togetherAi tesseract ppio context-windows/* -MintplexLabs \ No newline at end of file +MintplexLabs +cometapi \ No newline at end of file diff --git a/server/utils/AiProviders/cometapi/constants.js b/server/utils/AiProviders/cometapi/constants.js new file mode 100644 index 00000000..2d7a32da --- /dev/null +++ b/server/utils/AiProviders/cometapi/constants.js @@ -0,0 +1,39 @@ +// TODO: When CometAPI's model list is upgraded, this operation needs to be removed +// Model filtering patterns from cometapi.md that are not supported by AnythingLLM +module.exports.COMETAPI_IGNORE_PATTERNS = [ + // Image generation models + "dall-e", + "dalle", + "midjourney", + "mj_", + "stable-diffusion", + "sd-", + "flux-", + "playground-v", + "ideogram", + "recraft-", + "black-forest-labs", + "/recraft-v3", + "recraftv3", + "stability-ai/", + "sdxl", + // Audio generation models + "suno_", + "tts", + "whisper", + // Video generation models + "runway", + "luma_", + "luma-", + "veo", + "kling_", + "minimax_video", + "hunyuan-t1", + // Utility models + "embedding", + "search-gpts", + "files_retrieve", + "moderation", + // Deepl + "deepl", +]; diff --git a/server/utils/AiProviders/cometapi/index.js b/server/utils/AiProviders/cometapi/index.js new file mode 100644 index 00000000..82fb7c1b --- /dev/null +++ b/server/utils/AiProviders/cometapi/index.js @@ -0,0 +1,433 @@ +const { NativeEmbedder } = require("../../EmbeddingEngines/native"); +const { v4: uuidv4 } = require("uuid"); +const { + writeResponseChunk, + clientAbortedHandler, + formatChatHistory, +} = require("../../helpers/chat/responses"); +const fs = require("fs"); +const path = require("path"); +const { safeJsonParse } = require("../../http"); +const { + LLMPerformanceMonitor, +} = require("../../helpers/chat/LLMPerformanceMonitor"); +const { COMETAPI_IGNORE_PATTERNS } = require("./constants"); +const cacheFolder = path.resolve( + process.env.STORAGE_DIR + ? path.resolve(process.env.STORAGE_DIR, "models", "cometapi") + : path.resolve(__dirname, `../../../storage/models/cometapi`) +); + +class CometApiLLM { + constructor(embedder = null, modelPreference = null) { + if (!process.env.COMETAPI_LLM_API_KEY) + throw new Error("No CometAPI API key was set."); + + const { OpenAI: OpenAIApi } = require("openai"); + this.basePath = "https://api.cometapi.com/v1"; + this.openai = new OpenAIApi({ + baseURL: this.basePath, + apiKey: process.env.COMETAPI_LLM_API_KEY ?? null, + defaultHeaders: { + "HTTP-Referer": "https://anythingllm.com", + "X-CometAPI-Source": "anythingllm", + }, + }); + this.model = + modelPreference || process.env.COMETAPI_LLM_MODEL_PREF || "gpt-5-mini"; + this.limits = { + history: this.promptWindowLimit() * 0.15, + system: this.promptWindowLimit() * 0.15, + user: this.promptWindowLimit() * 0.7, + }; + + this.embedder = embedder ?? new NativeEmbedder(); + this.defaultTemp = 0.7; + this.timeout = this.#parseTimeout(); + + if (!fs.existsSync(cacheFolder)) + fs.mkdirSync(cacheFolder, { recursive: true }); + this.cacheModelPath = path.resolve(cacheFolder, "models.json"); + this.cacheAtPath = path.resolve(cacheFolder, ".cached_at"); + + this.log(`Loaded with model: ${this.model}`); + } + + log(text, ...args) { + console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args); + } + + /** + * CometAPI has various models that never return `finish_reasons` and thus leave the stream open + * which causes issues in subsequent messages. This timeout value forces us to close the stream after + * x milliseconds. This is a configurable value via the COMETAPI_LLM_TIMEOUT_MS value + * @returns {number} The timeout value in milliseconds (default: 500) + */ + #parseTimeout() { + if (isNaN(Number(process.env.COMETAPI_LLM_TIMEOUT_MS))) return 500; + const setValue = Number(process.env.COMETAPI_LLM_TIMEOUT_MS); + if (setValue < 500) return 500; + return setValue; + } + + // This checks if the .cached_at file has a timestamp that is more than 1Week (in millis) + // from the current date. If it is, then we will refetch the API so that all the models are up + // to date. + #cacheIsStale() { + const MAX_STALE = 6.048e8; // 1 Week in MS + if (!fs.existsSync(this.cacheAtPath)) return true; + const now = Number(new Date()); + const timestampMs = Number(fs.readFileSync(this.cacheAtPath)); + return now - timestampMs > MAX_STALE; + } + + // The CometAPI model API has a lot of models, so we cache this locally in the directory + // as if the cache directory JSON file is stale or does not exist we will fetch from API and store it. + // This might slow down the first request, but we need the proper token context window + // for each model and this is a constructor property - so we can really only get it if this cache exists. + // We used to have this as a chore, but given there is an API to get the info - this makes little sense. + async #syncModels() { + if (fs.existsSync(this.cacheModelPath) && !this.#cacheIsStale()) + return false; + + this.log( + "Model cache is not present or stale. Fetching from CometAPI API." + ); + await fetchCometApiModels(); + return; + } + + #appendContext(contextTexts = []) { + if (!contextTexts || !contextTexts.length) return ""; + return ( + "\nContext:\n" + + contextTexts + .map((text, i) => { + return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`; + }) + .join("") + ); + } + + models() { + if (!fs.existsSync(this.cacheModelPath)) return {}; + return safeJsonParse( + fs.readFileSync(this.cacheModelPath, { encoding: "utf-8" }), + {} + ); + } + + streamingEnabled() { + return "streamGetChatCompletion" in this; + } + + static promptWindowLimit(modelName) { + const cacheModelPath = path.resolve(cacheFolder, "models.json"); + const availableModels = fs.existsSync(cacheModelPath) + ? safeJsonParse( + fs.readFileSync(cacheModelPath, { encoding: "utf-8" }), + {} + ) + : {}; + return availableModels[modelName]?.maxLength || 4096; + } + + promptWindowLimit() { + const availableModels = this.models(); + return availableModels[this.model]?.maxLength || 4096; + } + + async isValidChatCompletionModel(model = "") { + await this.#syncModels(); + const availableModels = this.models(); + return availableModels.hasOwnProperty(model); + } + + /** + * Generates appropriate content array for a message + attachments. + * @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}} + * @returns {string|object[]} + */ + #generateContent({ userPrompt, attachments = [] }) { + if (!attachments.length) { + return userPrompt; + } + + const content = [{ type: "text", text: userPrompt }]; + for (let attachment of attachments) { + content.push({ + type: "image_url", + image_url: { + url: attachment.contentString, + detail: "auto", + }, + }); + } + return content.flat(); + } + + constructPrompt({ + systemPrompt = "", + contextTexts = [], + chatHistory = [], + userPrompt = "", + attachments = [], + }) { + const prompt = { + role: "system", + content: `${systemPrompt}${this.#appendContext(contextTexts)}`, + }; + return [ + prompt, + ...formatChatHistory(chatHistory, this.#generateContent), + { + role: "user", + content: this.#generateContent({ userPrompt, attachments }), + }, + ]; + } + + async getChatCompletion(messages = null, { temperature = 0.7 }) { + if (!(await this.isValidChatCompletionModel(this.model))) + throw new Error( + `CometAPI chat: ${this.model} is not valid for chat completion!` + ); + + const result = await LLMPerformanceMonitor.measureAsyncFunction( + this.openai.chat.completions + .create({ + model: this.model, + messages, + temperature, + }) + .catch((e) => { + throw new Error(e.message); + }) + ); + + if ( + !result.output.hasOwnProperty("choices") || + result.output.choices.length === 0 + ) + return null; + + return { + textResponse: result.output.choices[0].message.content, + metrics: { + prompt_tokens: result.output.usage.prompt_tokens || 0, + completion_tokens: result.output.usage.completion_tokens || 0, + total_tokens: result.output.usage.total_tokens || 0, + outputTps: result.output.usage.completion_tokens / result.duration, + duration: result.duration, + }, + }; + } + + async streamGetChatCompletion(messages = null, { temperature = 0.7 }) { + if (!(await this.isValidChatCompletionModel(this.model))) + throw new Error( + `CometAPI chat: ${this.model} is not valid for chat completion!` + ); + + const measuredStreamRequest = await LLMPerformanceMonitor.measureStream( + this.openai.chat.completions.create({ + model: this.model, + stream: true, + messages, + temperature, + }), + messages + ); + return measuredStreamRequest; + } + + /** + * Handles the default stream response for a chat. + * @param {import("express").Response} response + * @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream + * @param {Object} responseProps + * @returns {Promise} + */ + handleStream(response, stream, responseProps) { + const timeoutThresholdMs = this.timeout; + const { uuid = uuidv4(), sources = [] } = responseProps; + + return new Promise(async (resolve) => { + let fullText = ""; + let lastChunkTime = null; // null when first token is still not received. + + // Establish listener to early-abort a streaming response + // in case things go sideways or the user does not like the response. + // We preserve the generated text but continue as if chat was completed + // to preserve previously generated content. + const handleAbort = () => { + stream?.endMeasurement({ + completion_tokens: LLMPerformanceMonitor.countTokens(fullText), + }); + clientAbortedHandler(resolve, fullText); + }; + response.on("close", handleAbort); + + // NOTICE: Not all CometAPI models will return a stop reason + // which keeps the connection open and so the model never finalizes the stream + // like the traditional OpenAI response schema does. So in the case the response stream + // never reaches a formal close state we maintain an interval timer that if we go >=timeoutThresholdMs with + // no new chunks then we kill the stream and assume it to be complete. CometAPI is quite fast + // so this threshold should permit most responses, but we can adjust `timeoutThresholdMs` if + // we find it is too aggressive. + const timeoutCheck = setInterval(() => { + if (lastChunkTime === null) return; + + const now = Number(new Date()); + const diffMs = now - lastChunkTime; + if (diffMs >= timeoutThresholdMs) { + this.log( + `CometAPI stream did not self-close and has been stale for >${timeoutThresholdMs}ms. Closing response stream.` + ); + writeResponseChunk(response, { + uuid, + sources, + type: "textResponseChunk", + textResponse: "", + close: true, + error: false, + }); + clearInterval(timeoutCheck); + response.removeListener("close", handleAbort); + stream?.endMeasurement({ + completion_tokens: LLMPerformanceMonitor.countTokens(fullText), + }); + resolve(fullText); + } + }, 500); + + try { + for await (const chunk of stream) { + const message = chunk?.choices?.[0]; + const token = message?.delta?.content; + lastChunkTime = Number(new Date()); + + if (token) { + fullText += token; + writeResponseChunk(response, { + uuid, + sources: [], + type: "textResponseChunk", + textResponse: token, + close: false, + error: false, + }); + } + + if (message.finish_reason !== null) { + writeResponseChunk(response, { + uuid, + sources, + type: "textResponseChunk", + textResponse: "", + close: true, + error: false, + }); + response.removeListener("close", handleAbort); + stream?.endMeasurement({ + completion_tokens: LLMPerformanceMonitor.countTokens(fullText), + }); + resolve(fullText); + } + } + } catch (e) { + writeResponseChunk(response, { + uuid, + sources, + type: "abort", + textResponse: null, + close: true, + error: e.message, + }); + response.removeListener("close", handleAbort); + stream?.endMeasurement({ + completion_tokens: LLMPerformanceMonitor.countTokens(fullText), + }); + resolve(fullText); + } + }); + } + + // Simple wrapper for dynamic embedder & normalize interface for all LLM implementations + async embedTextInput(textInput) { + return await this.embedder.embedTextInput(textInput); + } + async embedChunks(textChunks = []) { + return await this.embedder.embedChunks(textChunks); + } + + async compressMessages(promptArgs = {}, rawHistory = []) { + const { messageArrayCompressor } = require("../../helpers/chat"); + const messageArray = this.constructPrompt(promptArgs); + return await messageArrayCompressor(this, messageArray, rawHistory); + } +} + +/** + * Fetches available models from CometAPI and filters out non-chat models + * Based on cometapi.md specifications + */ +async function fetchCometApiModels() { + return await fetch(`https://api.cometapi.com/v1/models`, { + method: "GET", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${process.env.COMETAPI_LLM_API_KEY}`, + }, + }) + .then((res) => res.json()) + .then(({ data = [] }) => { + const models = {}; + + // Filter out non-chat models using patterns from cometapi.md + const chatModels = data.filter((model) => { + const modelId = model.id.toLowerCase(); + return !COMETAPI_IGNORE_PATTERNS.some((pattern) => + modelId.includes(pattern.toLowerCase()) + ); + }); + + chatModels.forEach((model) => { + models[model.id] = { + id: model.id, + name: model.id, // CometAPI has limited model info according to cometapi.md + organization: + model.id.split("/")[0] || model.id.split("-")[0] || "CometAPI", + maxLength: model.context_length || 4096, // Conservative default + }; + }); + + // Cache all response information + if (!fs.existsSync(cacheFolder)) + fs.mkdirSync(cacheFolder, { recursive: true }); + fs.writeFileSync( + path.resolve(cacheFolder, "models.json"), + JSON.stringify(models), + { + encoding: "utf-8", + } + ); + fs.writeFileSync( + path.resolve(cacheFolder, ".cached_at"), + String(Number(new Date())), + { + encoding: "utf-8", + } + ); + return models; + }) + .catch((e) => { + console.error("Error fetching CometAPI models:", e); + return {}; + }); +} + +module.exports = { + CometApiLLM, + fetchCometApiModels, +}; diff --git a/server/utils/agents/aibitat/index.js b/server/utils/agents/aibitat/index.js index d6b22d3a..683850df 100644 --- a/server/utils/agents/aibitat/index.js +++ b/server/utils/agents/aibitat/index.js @@ -830,6 +830,8 @@ ${this.getHistory({ to: route.to }) return new Providers.GeminiProvider({ model: config.model }); case "dpais": return new Providers.DellProAiStudioProvider({ model: config.model }); + case "cometapi": + return new Providers.CometApiProvider({ model: config.model }); default: throw new Error( `Unknown provider: ${config.provider}. Please use a valid provider.` diff --git a/server/utils/agents/aibitat/providers/ai-provider.js b/server/utils/agents/aibitat/providers/ai-provider.js index 07867e4c..c2528acd 100644 --- a/server/utils/agents/aibitat/providers/ai-provider.js +++ b/server/utils/agents/aibitat/providers/ai-provider.js @@ -251,6 +251,14 @@ class Provider { apiKey: null, ...config, }); + case "cometapi": + return new ChatOpenAI({ + configuration: { + baseURL: "https://api.cometapi.com/v1", + }, + apiKey: process.env.COMETAPI_LLM_API_KEY ?? null, + ...config, + }); default: throw new Error(`Unsupported provider ${provider} for this task.`); diff --git a/server/utils/agents/aibitat/providers/cometapi.js b/server/utils/agents/aibitat/providers/cometapi.js new file mode 100644 index 00000000..87eca7a0 --- /dev/null +++ b/server/utils/agents/aibitat/providers/cometapi.js @@ -0,0 +1,115 @@ +const OpenAI = require("openai"); +const Provider = require("./ai-provider.js"); +const InheritMultiple = require("./helpers/classes.js"); +const UnTooled = require("./helpers/untooled.js"); + +/** + * The agent provider for the CometAPI provider. + */ +class CometApiProvider extends InheritMultiple([Provider, UnTooled]) { + model; + + constructor(config = {}) { + const { model = "gpt-5-mini" } = config; + super(); + const client = new OpenAI({ + baseURL: "https://api.cometapi.com/v1", + apiKey: process.env.COMETAPI_LLM_API_KEY, + maxRetries: 3, + defaultHeaders: { + "HTTP-Referer": "https://anythingllm.com", + "X-CometAPI-Source": "anythingllm", + }, + }); + + this._client = client; + this.model = model; + this.verbose = true; + } + + get client() { + return this._client; + } + + async #handleFunctionCallChat({ messages = [] }) { + return await this.client.chat.completions + .create({ + model: this.model, + temperature: 0, + messages, + }) + .then((result) => { + if (!result.hasOwnProperty("choices")) + throw new Error("CometAPI chat: No results!"); + if (result.choices.length === 0) + throw new Error("CometAPI chat: No results length!"); + return result.choices[0].message.content; + }) + .catch((_) => { + return null; + }); + } + + /** + * Create a completion based on the received messages. + * + * @param messages A list of messages to send to the API. + * @param functions + * @returns The completion. + */ + async complete(messages, functions = []) { + let completion; + if (functions.length > 0) { + const { toolCall, text } = await this.functionCall( + messages, + functions, + this.#handleFunctionCallChat.bind(this) + ); + + if (toolCall !== null) { + this.providerLog(`Valid tool call found - running ${toolCall.name}.`); + this.deduplicator.trackRun(toolCall.name, toolCall.arguments); + return { + result: null, + functionCall: { + name: toolCall.name, + arguments: toolCall.arguments, + }, + cost: 0, + }; + } + completion = { content: text }; + } + + if (!completion?.content) { + this.providerLog("Will assume chat completion without tool call inputs."); + const response = await this.client.chat.completions.create({ + model: this.model, + messages: this.cleanMsgs(messages), + }); + completion = response.choices[0].message; + } + + // The UnTooled class inherited Deduplicator is mostly useful to prevent the agent + // from calling the exact same function over and over in a loop within a single chat exchange + // _but_ we should enable it to call previously used tools in a new chat interaction. + this.deduplicator.reset("runs"); + return { + result: completion.content, + cost: 0, + }; + } + + /** + * Get the cost of the completion. + * + * @param _usage The completion to get the cost for. + * @returns The cost of the completion. + * Stubbed since CometAPI has no cost basis. + */ + getCost() { + return 0; + } +} + +module.exports = CometApiProvider; diff --git a/server/utils/agents/aibitat/providers/index.js b/server/utils/agents/aibitat/providers/index.js index 859ad9de..2146269b 100644 --- a/server/utils/agents/aibitat/providers/index.js +++ b/server/utils/agents/aibitat/providers/index.js @@ -24,6 +24,7 @@ const PPIOProvider = require("./ppio.js"); const GeminiProvider = require("./gemini.js"); const DellProAiStudioProvider = require("./dellProAiStudio.js"); const MoonshotAiProvider = require("./moonshotAi.js"); +const CometApiProvider = require("./cometapi.js"); module.exports = { OpenAIProvider, @@ -47,6 +48,7 @@ module.exports = { ApiPieProvider, XAIProvider, NovitaProvider, + CometApiProvider, NvidiaNimProvider, PPIOProvider, GeminiProvider, diff --git a/server/utils/agents/index.js b/server/utils/agents/index.js index 4527ee78..46581d3c 100644 --- a/server/utils/agents/index.js +++ b/server/utils/agents/index.js @@ -204,6 +204,11 @@ class AgentHandler { throw new Error("Moonshot AI model must be set to use agents."); break; + case "cometapi": + if (!process.env.COMETAPI_LLM_API_KEY) + throw new Error("CometAPI API Key must be provided to use agents."); + break; + default: throw new Error( "No workspace agent provider set. Please set your agent provider in the workspace's settings" @@ -274,6 +279,8 @@ class AgentHandler { return process.env.GEMINI_LLM_MODEL_PREF ?? "gemini-2.0-flash-lite"; case "dpais": return process.env.DPAIS_LLM_MODEL_PREF; + case "cometapi": + return process.env.COMETAPI_LLM_MODEL_PREF ?? "gpt-5-mini"; default: return null; } diff --git a/server/utils/helpers/customModels.js b/server/utils/helpers/customModels.js index e0a1fb82..ea5e738c 100644 --- a/server/utils/helpers/customModels.js +++ b/server/utils/helpers/customModels.js @@ -8,6 +8,7 @@ const { parseLMStudioBasePath } = require("../AiProviders/lmStudio"); const { parseNvidiaNimBasePath } = require("../AiProviders/nvidiaNim"); const { fetchPPIOModels } = require("../AiProviders/ppio"); const { GeminiLLM } = require("../AiProviders/gemini"); +const { fetchCometApiModels } = require("../AiProviders/cometapi"); const SUPPORT_CUSTOM_MODELS = [ "openai", @@ -28,6 +29,7 @@ const SUPPORT_CUSTOM_MODELS = [ "deepseek", "apipie", "novita", + "cometapi", "xai", "gemini", "ppio", @@ -76,6 +78,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) { return await getAPIPieModels(apiKey); case "novita": return await getNovitaModels(); + case "cometapi": + return await getCometApiModels(); case "xai": return await getXAIModels(apiKey); case "nvidia-nim": @@ -453,6 +457,20 @@ async function getNovitaModels() { return { models, error: null }; } +async function getCometApiModels() { + const knownModels = await fetchCometApiModels(); + if (!Object.keys(knownModels).length === 0) + return { models: [], error: null }; + const models = Object.values(knownModels).map((model) => { + return { + id: model.id, + organization: model.organization, + name: model.name, + }; + }); + return { models, error: null }; +} + async function getAPIPieModels(apiKey = null) { const knownModels = await fetchApiPieModels(apiKey); if (!Object.keys(knownModels).length === 0) diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js index bff2873b..12327698 100644 --- a/server/utils/helpers/index.js +++ b/server/utils/helpers/index.js @@ -212,6 +212,9 @@ function getLLMProvider({ provider = null, model = null } = {}) { case "dpais": const { DellProAiStudioLLM } = require("../AiProviders/dellProAiStudio"); return new DellProAiStudioLLM(embedder, model); + case "cometapi": + const { CometApiLLM } = require("../AiProviders/cometapi"); + return new CometApiLLM(embedder, model); default: throw new Error( `ENV: No valid LLM_PROVIDER value found in environment! Using ${process.env.LLM_PROVIDER}` @@ -362,6 +365,9 @@ function getLLMProviderClass({ provider = null } = {}) { case "moonshotai": const { MoonshotAiLLM } = require("../AiProviders/moonshotAi"); return MoonshotAiLLM; + case "cometapi": + const { CometApiLLM } = require("../AiProviders/cometapi"); + return CometApiLLM; default: return null; } @@ -430,6 +436,8 @@ function getBaseLLMProviderModel({ provider = null } = {}) { return process.env.DPAIS_LLM_MODEL_PREF; case "moonshotai": return process.env.MOONSHOT_AI_MODEL_PREF; + case "cometapi": + return process.env.COMETAPI_LLM_MODEL_PREF; default: return null; } diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js index 124b4b4e..6dfbe4fc 100644 --- a/server/utils/helpers/updateENV.js +++ b/server/utils/helpers/updateENV.js @@ -704,6 +704,20 @@ const KEY_MAPPING = { envKey: "MOONSHOT_AI_MODEL_PREF", checks: [isNotEmpty], }, + + // CometAPI Options + CometApiLLMApiKey: { + envKey: "COMETAPI_LLM_API_KEY", + checks: [isNotEmpty], + }, + CometApiLLMModelPref: { + envKey: "COMETAPI_LLM_MODEL_PREF", + checks: [isNotEmpty], + }, + CometApiLLMTimeout: { + envKey: "COMETAPI_LLM_TIMEOUT_MS", + checks: [], + }, }; function isNotEmpty(input = "") { @@ -813,6 +827,7 @@ function supportedLLM(input = "") { "ppio", "dpais", "moonshotai", + "cometapi", ].includes(input); return validSelection ? null : `${input} is not a valid LLM provider.`; }