@cogniformai/instructor-stream
Version:
Streaming-first structured data extraction from LLMs with real-time updates
1 lines • 52.3 kB
Source Map (JSON)
{"version":3,"sources":["/Users/m1mbp/tools/instructor-stream-js/packages/instructor-stream/dist/index.cjs","../src/instructor.ts","../src/lib/index.ts","../src/constants/index.ts","../src/stream/oai/params.ts","../src/stream/response-model.ts","../src/constants/providers.ts","../src/errors.ts","../src/index.ts"],"names":["omit","keys","obj","result","key","iterableTee","iterable","n","buffers","resolvers","iterator","done","reader","index","resolve","item","buffer","_","i","MODE","OAIBuildFunctionParams","definition","params","name","description","definitionParams","function_call","functions","OAIBuildToolFunctionParams","tool_choice","tools","tool","OAIBuildMessageBasedParams","OAIBuildThinkingMessageBasedParams","OAIBuildJsonModeParams","OAIBuildJsonSchemaParams","withResponseModel","schema","mode","safeName","jsonSchema","MODE_TO_RESPONSE_PARSER","thinkingJsonParser","PROVIDERS","PROVIDER_SUPPORTED_MODES","NON_OAI_PROVIDER_URLS","PROVIDER_PARAMS_TRANSFORMERS","PROVIDER_SUPPORTED_MODES_BY_MODEL","InstructorError","message","cause","UnsupportedClientError","RetryableError","NonRetryableError","ValidationError","issues","MAX_RETRIES_DEFAULT","Instructor","client","debug","logger","retryAllErrors","requestOptions","isGenericClient","OpenAI","isModeSupported","createInstructor"],"mappings":"AAAA,2lCAA2F,gFCAxE,6CACI,iEACK,0DACC,SCMbA,CAAAA,CAA0CC,CAAAA,CAAWC,CAAAA,CAAoB,CACvF,IAAMC,CAAAA,CAAS,CAAC,CAAA,CAChB,GAAA,CAAA,IAAWC,EAAAA,GAAOF,CAAAA,CACZA,CAAAA,CAAI,cAAA,CAAeE,CAAG,CAAA,EAAK,CAACH,CAAAA,CAAK,QAAA,CAASG,CAAmB,CAAA,EAAA,CAC/DD,CAAAA,CAAOC,CAAqC,CAAA,CAAIF,CAAAA,CAAIE,CAAG,CAAA,CAAA,CAG3D,OAAOD,CACT,CAcA,MAAA,SAAsBE,CAAAA,CACpBC,CAAAA,CACAC,CAAAA,CAC8B,CAC9B,IAAMC,CAAAA,CAAiB,KAAA,CAAM,IAAA,CAAK,CAAE,MAAA,CAAQD,CAAE,CAAA,CAAG,CAAA,CAAA,EAAM,CAAC,CAAC,CAAA,CACnDE,CAAAA,CAA4B,CAAC,CAAA,CAC7BC,CAAAA,CAAWJ,CAAAA,CAAS,MAAA,CAAO,aAAa,CAAA,CAAE,CAAA,CAC5CK,CAAAA,CAAO,CAAA,CAAA,CAELC,CAAAA,CAAS,MAAA,QAAA,CAAA,CAAiBC,CAAAA,CAAkC,CAChE,GAAA,CAAA,CAAA,CAAA,CACE,EAAA,CAAIL,CAAAA,CAAQK,CAAK,CAAA,CAAE,MAAA,CAAS,CAAA,CAC1B,MAAML,CAAAA,CAAQK,CAAK,CAAA,CAAE,KAAA,CAAM,CAAA,CAAA,IACtB,CAAA,EAAA,CAAIF,CAAAA,CACT,KAAA,CAEA,MAAM,IAAI,OAAA,CAAeG,CAAAA,EAAYL,CAAAA,CAAU,IAAA,CAAKK,CAAO,CAAC,CAAA,CAGlE,CAAA,CACA,OAAA,KAAA,CAAO,KAAA,CAAA,CAAA,EAAY,CACjB,IAAA,KAAA,CAAA,IAAiBC,EAAAA,EAAQ,CACvB,CAAC,MAAA,CAAO,aAAa,CAAA,CAAG,CAAA,CAAA,EAAML,CAChC,CAAA,CAAG,CACD,GAAA,CAAA,IAAWM,EAAAA,GAAUR,CAAAA,CACnBQ,CAAAA,CAAO,IAAA,CAAKD,CAAI,CAAA,CAGlB,GAAA,CAAA,CAAON,CAAAA,CAAU,MAAA,CAAS,CAAA,CAAA,CACxBA,CAAAA,CAAU,KAAA,CAAM,CAAA,CAAG,CAEvB,CAEA,GAAA,CADAE,CAAAA,CAAO,CAAA,CAAA,CACAF,CAAAA,CAAU,MAAA,CAAS,CAAA,CAAA,CACxBA,CAAAA,CAAU,KAAA,CAAM,CAAA,CAAG,CAEvB,CAAA,CAAA,CAAG,CAAA,CAEI,KAAA,CAAM,IAAA,CAAK,CAAE,MAAA,CAAQF,CAAE,CAAA,CAAG,CAACU,CAAAA,CAAGC,CAAAA,CAAAA,EAAMN,CAAAA,CAAOM,CAAC,CAAC,CACtD,CCtEO,IAAMC,CAAAA,CAAO,CAClB,SAAA,CAAW,WAAA,CACX,KAAA,CAAO,OAAA,CACP,IAAA,CAAM,MAAA,CACN,OAAA,CAAS,SAAA,CACT,WAAA,CAAa,aAAA,CACb,gBAAA,CAAkB,kBACpB,CAAA,CCQO,SAASC,CAAAA,CACdC,CAAAA,CACAC,CAAAA,CAC6B,CAC7B,GAAM,CAAE,IAAA,CAAAC,CAAAA,CAAM,WAAA,CAAAC,CAAAA,CAAa,GAAGC,CAAiB,CAAA,CAAIJ,CAAAA,CAC7CK,CAAAA,CAAyD,CAC7D,IAAA,CAAAH,CACF,CAAA,CACMI,CAAAA,CAAyC,CAC7C,oCAAIL,CAAAA,6BAAQ,WAAA,SAAa,CAAC,GAAA,CAC1B,CACE,IAAA,CAAMC,CAAAA,CACN,WAAA,kBAAaC,CAAAA,SAAe,KAAA,GAAA,CAC5B,UAAA,CAAYC,CACd,CACF,CAAA,CACA,MAAO,CACL,GAAGH,CAAAA,CACH,aAAA,CAAAI,CAAAA,CACA,SAAA,CAAAC,CACF,CACF,CAEO,SAASC,CAAAA,CACdP,CAAAA,CACAC,CAAAA,CACiC,CACjC,GAAM,CAAE,IAAA,CAAAC,CAAAA,CAAM,WAAA,CAAAC,CAAAA,CAAa,GAAGC,CAAiB,CAAA,CAAIJ,CAAAA,CAC7CQ,CAAAA,CAAqD,CACzD,IAAA,CAAM,UAAA,CACN,QAAA,CAAU,CAAE,IAAA,CAAAN,CAAK,CACnB,CAAA,CACMO,CAAAA,CAAqC,CACzC,CACE,IAAA,CAAM,UAAA,CACN,QAAA,CAAU,CACR,IAAA,CAAMP,CAAAA,CACN,WAAA,CAAaC,CAAAA,CACb,UAAA,CAAYC,CACd,CACF,CAAA,CACA,oCAAIH,CAAAA,qBAAO,KAAA,6BAAO,GAAA,mBACfS,CAAAA,EAAAA,CAAqC,CACpC,IAAA,CAAMA,CAAAA,CAAK,IAAA,CACX,QAAA,CAAU,CACR,IAAA,CAAMA,CAAAA,CAAK,QAAA,CAAS,IAAA,CACpB,WAAA,CAAaA,CAAAA,CAAK,QAAA,CAAS,WAAA,CAC3B,UAAA,CAAYA,CAAAA,CAAK,QAAA,CAAS,UAC5B,CACF,CAAA,CACF,GAAA,SAAK,CAAC,GACR,CAAA,CACA,MAAO,CACL,GAAGT,CAAAA,CACH,WAAA,CAAAO,CAAAA,CACA,KAAA,CAAAC,CACF,CACF,CAEO,SAASE,CAAAA,CACdX,CAAAA,CACAC,CAAAA,CACiC,CACjC,MAAO,CACL,GAAGA,CAAAA,CACH,QAAA,CAAU,CACR,CACE,IAAA,CAAM,QAAA,CACN,OAAA,CAAS,CAAA;AAAA;AAAA;AAAA;AAAA;AAAA,uBAAA,EAKQD,CAAAA,CAAW,WAAW,CAAA;AAAA,uBAAA,EACtB,IAAA,CAAK,SAAA,CAAUA,CAAU,CAAC,CAAA;AAAA,QAAA,CAE7C,CAAA,CACA,GAAGC,CAAAA,CAAO,QACZ,CACF,CACF,CAGO,SAASW,CAAAA,CACdZ,CAAAA,CACAC,CAAAA,CACiC,CACjC,MAAO,CACL,GAAGA,CAAAA,CACH,QAAA,CAAU,CACR,CACE,IAAA,CAAM,QAAA,CACN,OAAA,CAAS,CAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,uBAAA,EAqBQD,CAAAA,CAAW,WAAW,CAAA;AAAA,uBAAA,EACtB,IAAA,CAAK,SAAA,CAAUA,CAAU,CAAC,CAAA;AAAA,QAAA,CAE7C,CAAA,CACA,GAAGC,CAAAA,CAAO,QACZ,CACF,CACF,CAGO,SAASY,CAAAA,CACdb,CAAAA,CACAC,CAAAA,CAC6B,CAC7B,MAAO,CACL,GAAGA,CAAAA,CACH,eAAA,CAAiB,CAAE,IAAA,CAAM,aAAc,CAAA,CACvC,QAAA,CAAU,CACR,CACE,IAAA,CAAM,QAAA,CACN,OAAA,CAAS,CAAA;AAAA;AAAA;AAAA;AAAA;AAAA,uBAAA,EAKQD,CAAAA,CAAW,WAAW,CAAA;AAAA,uBAAA,EACtB,IAAA,CAAK,SAAA,CAAUA,CAAU,CAAC,CAAA;AAAA,QAAA,CAE7C,CAAA,CACA,GAAGC,CAAAA,CAAO,QACZ,CACF,CACF,CAGO,SAASa,CAAAA,CACdd,CAAAA,CACAC,CAAAA,CAC+B,CAC/B,MAAO,CACL,GAAGA,CAAAA,CACH,eAAA,CAAiB,CACf,IAAA,CAAM,aAAA,CACN,MAAA,CAAQtB,CAAAA,CAAK,CAAC,MAAA,CAAQ,aAAa,CAAA,CAAGqB,CAAU,CAClD,CAAA,CACA,QAAA,CAAU,CACR,CACE,IAAA,CAAM,QAAA,CACN,OAAA,CAAS,CAAA;AAAA;AAAA;AAAA;AAAA;AAAA,uBAAA,EAKQA,CAAAA,CAAW,WAAW,CAAA;AAAA,QAAA,CAEzC,CAAA,CACA,GAAGC,CAAAA,CAAO,QACZ,CACF,CACF,CCjLA,SAGgBc,CAAAA,CAId,CACA,cAAA,CAAgB,CAAE,IAAA,CAAAb,CAAAA,CAAM,MAAA,CAAAc,CAAAA,CAAQ,WAAA,CAAAb,CAAAA,CAAc,EAAG,CAAA,CACjD,IAAA,CAAAc,CAAAA,CACA,MAAA,CAAAhB,CACF,CAAA,CAI+B,CAC7B,IAAMiB,CAAAA,CAAWhB,CAAAA,CAAK,OAAA,CAAQ,eAAA,CAAiB,GAAG,CAAA,CAAE,OAAA,CAAQ,KAAA,CAAO,GAAG,CAAA,CAKhEiB,CAAAA,CAAe,CAAA,CAAA,YAAA,CAAaH,CAAM,CAAA,CAMxC,OAAOG,CAAAA,CAAW,OAAA,CAClB,IAAMnB,CAAAA,CAAa,CACjB,IAAA,CAAMkB,CAAAA,CACN,WAAA,CAAAf,CAAAA,CACA,GAAGgB,CACL,CAAA,CAQA,OAAIF,CAAAA,GAASnB,CAAAA,CAAK,SAAA,CACTC,CAAAA,CAA0BC,CAAAA,CAAYC,CAAM,CAAA,CAGjDgB,CAAAA,GAASnB,CAAAA,CAAK,KAAA,CACTS,CAAAA,CAA8BP,CAAAA,CAAYC,CAAM,CAAA,CAGrDgB,CAAAA,GAASnB,CAAAA,CAAK,IAAA,CACTe,CAAAA,CAA0Bb,CAAAA,CAAYC,CAAM,CAAA,CAGjDgB,CAAAA,GAASnB,CAAAA,CAAK,WAAA,CACTgB,CAAAA,CAA4Bd,CAAAA,CAAYC,CAAM,CAAA,CAGnDgB,CAAAA,GAASnB,CAAAA,CAAK,OAAA,CACTa,CAAAA,CAA8BX,CAAAA,CAAYC,CAAM,CAAA,CAGrDgB,CAAAA,GAASnB,CAAAA,CAAK,gBAAA,CACTc,CAAAA,CAAsCZ,CAAAA,CAAYC,CAAM,CAAA,CAG1DU,CAAAA,CAA8BX,CAAAA,CAAYC,CAAM,CACzD,CCpEO,IAAMH,CAAAA,CAAqBA,CAAAA,CAErBsB,CAAAA,CAA0B,CACrC,CAACtB,CAAAA,CAAK,gBAAgB,CAAA,CAAGuB,mBAC3B,CAAA,CAEaC,CAAAA,CAAY,CACvB,GAAA,CAAK,KAAA,CACL,QAAA,CAAU,UAAA,CACV,QAAA,CAAU,UAAA,CACV,SAAA,CAAW,WAAA,CACX,IAAA,CAAM,MAAA,CACN,KAAA,CAAO,OACT,CAAA,CAIaC,CAAAA,CAET,CACF,CAACD,CAAAA,CAAU,KAAK,CAAA,CAAG,CAACxB,CAAAA,CAAK,SAAA,CAAWA,CAAAA,CAAK,KAAA,CAAOA,CAAAA,CAAK,IAAA,CAAMA,CAAAA,CAAK,WAAA,CAAaA,CAAAA,CAAK,OAAO,CAAA,CACzF,CAACwB,CAAAA,CAAU,GAAG,CAAA,CAAG,CAACxB,CAAAA,CAAK,SAAA,CAAWA,CAAAA,CAAK,KAAA,CAAOA,CAAAA,CAAK,IAAA,CAAMA,CAAAA,CAAK,OAAO,CAAA,CACrE,CAACwB,CAAAA,CAAU,QAAQ,CAAA,CAAG,CAACxB,CAAAA,CAAK,KAAA,CAAOA,CAAAA,CAAK,IAAA,CAAMA,CAAAA,CAAK,WAAA,CAAaA,CAAAA,CAAK,OAAO,CAAA,CAC5E,CAACwB,CAAAA,CAAU,QAAQ,CAAA,CAAG,CAACxB,CAAAA,CAAK,KAAA,CAAOA,CAAAA,CAAK,IAAA,CAAMA,CAAAA,CAAK,WAAA,CAAaA,CAAAA,CAAK,OAAO,CAAA,CAC5E,CAACwB,CAAAA,CAAU,SAAS,CAAA,CAAG,CAACxB,CAAAA,CAAK,OAAA,CAASA,CAAAA,CAAK,KAAK,CAAA,CAChD,CAACwB,CAAAA,CAAU,IAAI,CAAA,CAAG,CAACxB,CAAAA,CAAK,KAAA,CAAOA,CAAAA,CAAK,SAAA,CAAWA,CAAAA,CAAK,OAAO,CAC7D,CAAA,CAEa0B,CAAAA,CAAwB,CACnC,CAACF,CAAAA,CAAU,QAAQ,CAAA,CAAG,wBAAA,CACtB,CAACA,CAAAA,CAAU,QAAQ,CAAA,CAAG,kBAAA,CACtB,CAACA,CAAAA,CAAU,GAAG,CAAA,CAAG,gBAAA,CACjB,CAACA,CAAAA,CAAU,SAAS,CAAA,CAAG,mBAAA,CACvB,CAACA,CAAAA,CAAU,IAAI,CAAA,CAAG,cACpB,CAAA,CAEaG,CAAAA,CAA+B,CAC1C,CAACH,CAAAA,CAAU,IAAI,CAAA,CAAG,CAChB,CAACxB,CAAAA,CAAK,KAAK,CAAA,CAAG,QAAA,CAGZG,CAAAA,CAA6D,CAC7D,OACEA,CAAAA,CAAO,KAAA,EACPA,CAAAA,CAAO,KAAA,CAAM,IAAA,CAAMS,CAAAA,EAAoCA,CAAI,CAAA,EAC3DT,CAAAA,CAAO,MAAA,EAEP,OAAA,CAAQ,IAAA,CAAK,8EAA8E,CAAA,CACpFA,CAGX,CACF,CAAA,CACA,CAACqB,CAAAA,CAAU,QAAQ,CAAA,CAAG,CACpB,CAACxB,CAAAA,CAAK,WAAW,CAAA,CAAG,QAAA,CAGlBG,CAAAA,CAAmE,CAEnE,OAAIA,CAAAA,CAAO,eAAA,EAAmB,sBAAA,GAA0BA,CAAAA,CAAO,eAAA,CAAgB,MAAA,CACtE,CACL,GAAGA,CAAAA,CACH,eAAA,CAAiB,CACf,GAAGA,CAAAA,CAAO,eAAA,CACV,MAAA,CAAQtB,CAAAA,CAAK,CAAC,sBAAsB,CAAA,CAAGsB,CAAAA,CAAO,eAAA,CAAgB,MAAM,CACtE,CACF,CAAA,CAEKA,CACT,CAAA,CACA,CAACH,CAAAA,CAAK,KAAK,CAAA,CAAG,QAAA,CAGZG,CAAAA,CAA6D,CAC7D,OACEA,CAAAA,CAAO,KAAA,EACPA,CAAAA,CAAO,KAAA,CAAM,IAAA,CAAMS,CAAAA,kBAAoCA,CAAAA,qBAAK,QAAA,6BAAU,YAAU,CAAA,CAEzE,CACL,GAAGT,CAAAA,CACH,KAAA,CAAOA,CAAAA,CAAO,KAAA,CAAM,GAAA,CAAKS,CAAAA,kBACnBA,CAAAA,qBAAK,QAAA,6BAAU,YAAA,CACV,CACL,GAAGA,CAAAA,CACH,QAAA,CAAU,CACR,GAAGA,CAAAA,CAAK,QAAA,CACR,UAAA,CAAY/B,CAAAA,CAAK,CAAC,sBAAsB,CAAA,CAAG+B,CAAAA,CAAK,QAAA,CAAS,UAAU,CACrE,CACF,CAAA,CAEKA,CACR,CACH,CAAA,CAEKT,CACT,CACF,CAAA,CACA,CAACqB,CAAAA,CAAU,GAAG,CAAA,CAAG,CAAC,CAAA,CAClB,CAACA,CAAAA,CAAU,QAAQ,CAAA,CAAG,CAAC,CAAA,CACvB,CAACA,CAAAA,CAAU,SAAS,CAAA,CAAG,CAAC,CAAA,CACxB,CAACA,CAAAA,CAAU,KAAK,CAAA,CAAG,CAAC,CACtB,CAAA,CAEaI,EAAAA,CAAoC,CAC/C,CAACJ,CAAAA,CAAU,KAAK,CAAA,CAAG,CACjB,CAACxB,CAAAA,CAAK,SAAS,CAAA,CAAG,CAAC,GAAG,CAAA,CACtB,CAACA,CAAAA,CAAK,KAAK,CAAA,CAAG,CAAC,GAAG,CAAA,CAClB,CAACA,CAAAA,CAAK,IAAI,CAAA,CAAG,CAAC,GAAG,CAAA,CACjB,CAACA,CAAAA,CAAK,OAAO,CAAA,CAAG,CAAC,GAAG,CAAA,CACpB,CAACA,CAAAA,CAAK,WAAW,CAAA,CAAG,CAAC,GAAG,CAAA,CACxB,CAACA,CAAAA,CAAK,gBAAgB,CAAA,CAAG,CAAC,GAAG,CAC/B,CAAA,CACA,CAACwB,CAAAA,CAAU,GAAG,CAAA,CAAG,CACf,CAACxB,CAAAA,CAAK,SAAS,CAAA,CAAG,CAAC,GAAG,CAAA,CACtB,CAACA,CAAAA,CAAK,KAAK,CAAA,CAAG,CAAC,GAAG,CAAA,CAClB,CAACA,CAAAA,CAAK,IAAI,CAAA,CAAG,CAAC,GAAG,CAAA,CACjB,CAACA,CAAAA,CAAK,OAAO,CAAA,CAAG,CAAC,GAAG,CACtB,CAAA,CACA,CAACwB,CAAAA,CAAU,QAAQ,CAAA,CAAG,CACpB,CAACxB,CAAAA,CAAK,OAAO,CAAA,CAAG,CAAC,GAAG,CAGtB,CAAA,CACA,CAACwB,CAAAA,CAAU,QAAQ,CAAA,CAAG,CACpB,CAACxB,CAAAA,CAAK,OAAO,CAAA,CAAG,CAAC,GAAG,CAGtB,CAAA,CACA,CAACwB,CAAAA,CAAU,SAAS,CAAA,CAAG,CACrB,CAACxB,CAAAA,CAAK,OAAO,CAAA,CAAG,CAAC,GAAG,CAAA,CACpB,CAACA,CAAAA,CAAK,KAAK,CAAA,CAAG,CAAC,GAAG,CACpB,CAAA,CACA,CAACwB,CAAAA,CAAU,IAAI,CAAA,CAAG,CAChB,CAACxB,CAAAA,CAAK,KAAK,CAAA,CAAG,CAAC,GAAG,CAAA,CAClB,CAACA,CAAAA,CAAK,OAAO,CAAA,CAAG,CAAC,GAAG,CAAA,CACpB,CAACA,CAAAA,CAAK,gBAAgB,CAAA,CAAG,CAAC,+BAA+B,CAC3D,CACF,CAAA,CC7IO,IAAM6B,CAAAA,CAAN,MAAA,QAA8B,KAAM,CAEzC,WAAA,CACEC,CAAAA,CACgBC,CAAAA,CAChB,CACA,KAAA,CAAMD,CAAO,CAAA,CAFG,IAAA,CAAA,KAAA,CAAAC,CAAAA,CAGhB,IAAA,CAAK,IAAA,CAAO,iBACd,CACF,CAAA,CAGaC,CAAAA,CAAN,MAAA,QAAqCH,CAAgB,CAC1D,WAAA,CAAYC,CAAAA,CAAU,yBAAA,CAA2BC,CAAAA,CAAiB,CAChE,KAAA,CAAMD,CAAAA,CAASC,CAAK,CAAA,CACpB,IAAA,CAAK,IAAA,CAAO,wBACd,CACF,CAAA,CAGaE,CAAAA,CAAN,MAAA,QAA6BJ,CAAgB,CAClD,WAAA,CAAYC,CAAAA,CAAiBC,CAAAA,CAAiB,CAC5C,KAAA,CAAMD,CAAAA,CAASC,CAAK,CAAA,CACpB,IAAA,CAAK,IAAA,CAAO,gBACd,CACF,CAAA,CAGaG,CAAAA,CAAN,MAAA,QAAgCL,CAAgB,CACrD,WAAA,CAAYC,CAAAA,CAAiBC,CAAAA,CAAiB,CAC5C,KAAA,CAAMD,CAAAA,CAASC,CAAK,CAAA,CACpB,IAAA,CAAK,IAAA,CAAO,mBACd,CACF,CAAA,CAGaI,CAAAA,CAAN,MAAA,QAA8BN,CAAgB,CACnD,WAAA,CACkBO,CAAAA,CAChBL,CAAAA,CACA,CACA,KAAA,CAAM,iBAAA,CAAmBA,CAAK,CAAA,CAHd,IAAA,CAAA,MAAA,CAAAK,CAAAA,CAIhB,IAAA,CAAK,IAAA,CAAO,iBACd,CACF,CAAA,CNZA,IAAMC,CAAAA,CAAsB,CAAA,CAOtBC,CAAAA,CAAN,KAAoB,CAmBlB,WAAA,CAAY,CACV,MAAA,CAAAC,CAAAA,CACA,IAAA,CAAApB,CAAAA,CACA,KAAA,CAAAqB,CAAAA,CAAQ,CAAA,CAAA,CACR,MAAA,CAAAC,CAAAA,CAAS,KAAA,CAAA,CACT,cAAA,CAAAC,CAAAA,CAAiB,CAAA,CACnB,CAAA,CAAwB,CArBxB,IAAA,CAAS,KAAA,CAAiB,CAAA,CAAA,CAC1B,IAAA,CAAS,cAAA,CAA0B,CAAA,CAAA,CA8bnC,IAAA,CAAO,IAAA,CAAO,CACZ,WAAA,CAAa,CACX,MAAA,CAAQ,KAAA,CAKNvC,CAAAA,CACAwC,CAAAA,CAAAA,EAC4D,CAC5D,EAAA,CAAI,IAAA,CAAK,qCAAA,CAAsCxC,CAAM,CAAA,CACnD,OAAIA,CAAAA,CAAO,MAAA,CACF,IAAA,CAAK,oBAAA,CAAqBA,CAAAA,CAAQwC,CAAc,CAAA,CAKhD,IAAA,CAAK,sBAAA,CAAuBxC,CAAAA,CAAQwC,CAAc,CAAA,CAKtD,EAAA,iBAAI,IAAA,uBAAK,MAAA,uBAAO,IAAA,+BAAM,WAAA,+BAAa,QAAA,CAKxC,OAHE,IAAA,CAAK,gBAAA,CAAiBxC,CAAM,CAAA,CAC1B,MAAM,IAAA,CAAK,MAAA,CAAO,IAAA,CAAK,WAAA,CAAY,MAAA,CAAOA,CAAAA,CAAQwC,CAAc,CAAA,CAChE,MAAM,IAAA,CAAK,MAAA,CAAO,IAAA,CAAK,WAAA,CAAY,MAAA,CAAOxC,CAAAA,CAAQwC,CAAc,CAAA,CAGpE,MAAM,IAAIX,CAAAA,CAAuB,gCAAgC,CAErE,CACF,CACF,CAAA,CA1cE,EAAA,CAAI,CAACY,CAAAA,CAAgBL,CAAM,CAAA,EAAK,CAAA,CAAEA,EAAAA,WAAkBM,gBAAAA,CAAAA,CAClD,MAAM,IAAIb,CAAAA,CAAuB,8CAA8C,CAAA,CAE7EO,EAAAA,WAAkBM,gBAAAA,CACpB,IAAA,CAAK,MAAA,CAASN,CAAAA,CAEd,IAAA,CAAK,MAAA,CAASA,CAAAA,CAEhB,IAAA,CAAK,IAAA,CAAOpB,CAAAA,CACZ,IAAA,CAAK,KAAA,CAAQqB,CAAAA,CACb,IAAA,CAAK,cAAA,CAAiBE,CAAAA,CACtB,IAAA,CAAK,MAAA,kBAASD,CAAAA,SAAU,KAAA,GAAA,CACxB,IAAA,CAAK,QAAA,CACH,uBAAO,IAAA,uBAAK,MAAA,+BAAQ,SAAA,EAAY,QAAA,iBAC9B,IAAA,uBAAK,MAAA,+BAAQ,OAAA,uBAAQ,QAAA,qBAASf,CAAAA,CAAsB,QAAQ,GAAA,CAAIF,CAAAA,CAAU,QAAA,iBACxE,IAAA,uBAAK,MAAA,+BAAQ,OAAA,uBAAQ,QAAA,qBAASE,CAAAA,CAAsB,QAAQ,GAAA,CAAIF,CAAAA,CAAU,QAAA,iBAC1E,IAAA,uBAAK,MAAA,+BAAQ,OAAA,uBAAQ,QAAA,qBAASE,CAAAA,CAAsB,GAAG,GAAA,CAAIF,CAAAA,CAAU,GAAA,iBACrE,IAAA,uBAAK,MAAA,+BAAQ,OAAA,uBAAQ,QAAA,qBAASE,CAAAA,CAAsB,SAAS,GAAA,CAAIF,CAAAA,CAAU,SAAA,iBAC3E,IAAA,uBAAK,MAAA,+BAAQ,OAAA,uBAAQ,QAAA,qBAASE,CAAAA,CAAsB,IAAI,GAAA,CAAIF,CAAAA,CAAU,IAAA,CACtEA,CAAAA,CAAU,KAAA,CACZA,CAAAA,CAAU,KAAA,CACd,IAAA,CAAK,eAAA,CAAgB,CACvB,CAMQ,eAAA,CAAA,CAAkB,CACxB,IAAMsB,CAAAA,CAAkBrB,CAAAA,CAAyB,IAAA,CAAK,QAAQ,CAAA,CAAE,QAAA,CAAS,IAAA,CAAK,IAAI,CAAA,CAC9E,IAAA,CAAK,QAAA,GAAaD,CAAAA,CAAU,KAAA,EAC9B,IAAA,CAAK,GAAA,CAAI,OAAA,CAAS,2CAA2C,CAAA,CAE1DsB,CAAAA,EACH,IAAA,CAAK,GAAA,CAAI,MAAA,CAAQ,CAAA,KAAA,EAAQ,IAAA,CAAK,IAAI,CAAA,kCAAA,EAAqC,IAAA,CAAK,QAAQ,CAAA,CAAA;AOhG3EC,CAAAA","file":"/Users/m1mbp/tools/instructor-stream-js/packages/instructor-stream/dist/index.cjs","sourcesContent":[null,"import OpenAI from 'openai'\nimport { Stream } from 'openai/streaming'\nimport { z, ZodError } from 'zod'\nimport { fromZodError } from 'zod-validation-error'\nimport {\n MODE_TO_RESPONSE_PARSER,\n NON_OAI_PROVIDER_URLS,\n Provider,\n PROVIDER_PARAMS_TRANSFORMERS,\n PROVIDER_SUPPORTED_MODES,\n PROVIDERS,\n} from './constants/providers.ts'\nimport { iterableTee } from './lib'\nimport { OAIResponseParser, OAIStream, withResponseModel } from './stream'\nimport ZodStream from './stream/structured-stream.client.js'\nimport {\n ChatCompletionCreateParamsWithModel,\n ClientTypeChatCompletionParams,\n ClientTypeChatCompletionRequestOptions,\n CompletionMeta,\n GenericChatCompletion,\n GenericClient,\n InstructorConfig,\n LogLevel,\n Mode,\n OpenAILikeClient,\n ReturnTypeBasedOnParams,\n} from './types'\nimport {\n ValidationError,\n RetryableError,\n NonRetryableError,\n UnsupportedClientError,\n} from './errors'\n\nconst MAX_RETRIES_DEFAULT = 0\n\n/**\n * The Instructor class provides a unified interface for interacting with OpenAI-like clients,\n * supporting both standard and streaming chat completions with schema validation and error handling.\n * It manages provider-specific options, logging, and retry logic for robust API usage.\n */\nclass Instructor<C> {\n readonly client: OpenAILikeClient<C>\n readonly mode: Mode\n readonly provider: Provider\n readonly debug: boolean = false\n readonly retryAllErrors: boolean = false\n readonly logger?: <T extends unknown[]>(level: LogLevel, ...args: T) => void\n\n /**\n * Initializes a new Instructor instance for a given OpenAI-like client and configuration.\n * Validates the client and sets up provider-specific options and logging.\n *\n * Args:\n * client (OpenAILikeClient): The OpenAI-compatible client instance.\n * mode (Mode): The operation mode for completions.\n * debug (boolean, optional): Enables debug logging if true.\n * logger (function, optional): Custom logger function.\n * retryAllErrors (boolean, optional): If true, retries all errors.\n */\n constructor({\n client,\n mode,\n debug = false,\n logger = undefined,\n retryAllErrors = false,\n }: InstructorConfig<C>) {\n if (!isGenericClient(client) && !(client instanceof OpenAI)) {\n throw new UnsupportedClientError('Client does not match the required structure')\n }\n if (client instanceof OpenAI) {\n this.client = client as OpenAI\n } else {\n this.client = client as C & GenericClient\n }\n this.mode = mode\n this.debug = debug\n this.retryAllErrors = retryAllErrors\n this.logger = logger ?? undefined\n this.provider =\n typeof this.client?.baseURL === 'string' ?\n this.client?.baseURL.includes(NON_OAI_PROVIDER_URLS.ANYSCALE) ? PROVIDERS.ANYSCALE\n : this.client?.baseURL.includes(NON_OAI_PROVIDER_URLS.TOGETHER) ? PROVIDERS.TOGETHER\n : this.client?.baseURL.includes(NON_OAI_PROVIDER_URLS.OAI) ? PROVIDERS.OAI\n : this.client?.baseURL.includes(NON_OAI_PROVIDER_URLS.ANTHROPIC) ? PROVIDERS.ANTHROPIC\n : this.client?.baseURL.includes(NON_OAI_PROVIDER_URLS.GROQ) ? PROVIDERS.GROQ\n : PROVIDERS.OTHER\n : PROVIDERS.OTHER\n this.validateOptions()\n }\n\n /**\n * Validates the current provider and mode configuration.\n * Logs warnings if the mode is not supported by the provider.\n */\n private validateOptions() {\n const isModeSupported = PROVIDER_SUPPORTED_MODES[this.provider].includes(this.mode)\n if (this.provider === PROVIDERS.OTHER) {\n this.log('debug', 'Unknown provider - cant validate options.')\n }\n if (!isModeSupported) {\n this.log('warn', `Mode ${this.mode} may not be supported by provider ${this.provider}`)\n }\n }\n\n /**\n * Logs messages at the specified log level using the configured logger or console.\n * Skips debug logs if debug mode is disabled.\n *\n * Args:\n * level (LogLevel): The severity level of the log.\n * ...args: Additional arguments to log.\n */\n private log<T extends unknown[]>(level: LogLevel, ...args: T) {\n if (this.logger) {\n this.logger(level, ...args)\n }\n if (!this.debug && level === 'debug') {\n return\n }\n const timestamp = new Date().toISOString()\n switch (level) {\n case 'debug':\n console.debug(`[Instructor:DEBUG] ${timestamp}:`, ...args)\n break\n case 'info':\n console.info(`[Instructor:INFO] ${timestamp}:`, ...args)\n break\n case 'warn':\n console.warn(`[Instructor:WARN] ${timestamp}:`, ...args)\n break\n case 'error':\n console.error(`[Instructor:ERROR] ${timestamp}:`, ...args)\n break\n }\n }\n\n /**\n * Executes a standard (non-streaming) chat completion with schema validation and retry logic.\n * Returns the validated response data and associated metadata.\n *\n * Args:\n * params (ChatCompletionCreateParamsWithModel): Parameters for the chat completion, including the response model.\n * requestOptions (optional): Additional request options for the client.\n *\n * Returns:\n * Promise<{ data: z.output<T>[]; _meta: CompletionMeta }>: The validated completion data and metadata.\n *\n * Raises:\n * ValidationError: If the response does not match the schema.\n * RetryableError: If a retryable error occurs during completion.\n * NonRetryableError: If a non-retryable error occurs.\n */\n private async chatCompletionStandard<T extends z.ZodType>(\n {\n max_retries = MAX_RETRIES_DEFAULT,\n response_model,\n ...params\n }: ChatCompletionCreateParamsWithModel<T>,\n requestOptions?: ClientTypeChatCompletionRequestOptions<C>\n ): Promise<{ data: z.output<T>[]; _meta: CompletionMeta }> {\n let attempts = 0\n let validationIssues = ''\n let lastMessage: OpenAI.ChatCompletionMessageParam | null = null\n const paramsTransformer = (\n PROVIDER_PARAMS_TRANSFORMERS?.[this.provider] as Record<string, unknown> | undefined\n )?.[this.mode as unknown as string] as ((p: unknown) => unknown) | undefined\n let completionParams = withResponseModel({\n params: {\n ...params,\n stream: params.stream ?? false,\n } as OpenAI.ChatCompletionCreateParams,\n mode: this.mode,\n response_model,\n })\n if (typeof paramsTransformer === 'function') {\n completionParams = paramsTransformer(completionParams as unknown) as typeof completionParams\n }\n\n const makeCompletionCall = async () => {\n let resolvedParams = completionParams\n if (validationIssues?.length > 0) {\n resolvedParams = {\n ...completionParams,\n messages: [\n ...completionParams.messages,\n ...(lastMessage ? [lastMessage] : []),\n {\n role: 'user',\n content: `Please correct the function call; errors encountered:\\n ${validationIssues}`,\n },\n ],\n }\n }\n let completion\n try {\n if (this.client.chat?.completions?.create) {\n const result = await this.client.chat.completions.create(\n {\n ...resolvedParams,\n stream: false,\n },\n requestOptions\n )\n completion = result as GenericChatCompletion<typeof result>\n } else {\n throw new UnsupportedClientError('Unsupported client type -- no completion method found.')\n }\n this.log('debug', 'raw standard completion response: ', completion)\n } catch (error) {\n this.log(\n 'error',\n `Error making completion call - mode: ${this.mode} | Client base URL: ${this.client.baseURL} | with params:`,\n resolvedParams,\n `raw error`,\n error\n )\n throw new RetryableError(\n error instanceof Error ? error.message : 'Error making completion call',\n error\n )\n }\n const responseParser =\n MODE_TO_RESPONSE_PARSER?.[this.mode as unknown as keyof typeof MODE_TO_RESPONSE_PARSER] ??\n OAIResponseParser\n const parsedCompletion = responseParser(completion as OpenAI.Chat.Completions.ChatCompletion)\n\n try {\n const responseJson = parsedCompletion.json ?? parsedCompletion\n const data = JSON.parse(responseJson) as z.infer<T> & {\n _meta?: CompletionMeta\n thinking?: string\n }\n return {\n data: [data],\n _meta: {\n usage: completion?.usage ?? undefined,\n thinking: parsedCompletion?.thinking ?? undefined,\n },\n }\n } catch (error) {\n this.log(\n 'error',\n 'failed to parse completion',\n parsedCompletion,\n this.mode,\n 'attempt: ',\n attempts,\n 'max attempts: ',\n max_retries\n )\n throw new RetryableError('Failed to parse completion', error)\n }\n }\n\n const makeCompletionCallWithRetries = async () => {\n try {\n const data = await makeCompletionCall()\n const validation = await response_model.schema.safeParseAsync(data.data[0] as unknown)\n this.log('debug', response_model.name, 'Completion validation: ', validation)\n if (!validation.success) {\n if ('error' in validation && validation.error instanceof ZodError) {\n lastMessage = {\n role: 'assistant',\n content: JSON.stringify(data),\n }\n try {\n if (\n validation.error &&\n Array.isArray((validation.error as { issues?: unknown[] }).issues) &&\n (validation.error as { issues?: unknown[] }).issues!.length > 0\n ) {\n try {\n const errorForFormatting = validation.error as unknown as Parameters<\n typeof fromZodError\n >[0]\n validationIssues =\n fromZodError(errorForFormatting)?.message ?? 'Validation failed with issues'\n } catch {\n const firstMsg = validation.error.issues?.[0]?.message\n validationIssues = firstMsg ?? 'Validation failed with issues'\n }\n } else {\n validationIssues = 'Validation failed: error structure missing or invalid'\n this.log('debug', 'Validation error structure:', JSON.stringify(validation.error))\n }\n } catch (fromZodErrorException) {\n validationIssues = `Validation failed: ${\n validation.error?.issues?.[0]?.message ?? 'unknown validation error'\n }`\n this.log('debug', 'fromZodError failed:', fromZodErrorException)\n this.log('debug', 'Original validation error:', JSON.stringify(validation.error))\n }\n // Propagate the original ZodError without introducing a new local error.\n throw new ValidationError(validation.error.issues, validation.error)\n } else {\n // Propagate non-Zod validation failure by rethrowing as-is to avoid masking upstream errors.\n // Use the correct caught variable from this catch scope.\n throw new NonRetryableError('Validation failed', validation.error)\n }\n }\n return { data: [validation.data], _meta: data?._meta ?? {} }\n } catch (error) {\n if (!this.retryAllErrors && !(error instanceof ValidationError)) {\n throw error\n }\n if (attempts < max_retries) {\n this.log(\n 'debug',\n `response model: ${response_model.name} - Retrying, attempt: `,\n attempts\n )\n this.log(\n 'warn',\n `response model: ${response_model.name} - Validation issues: `,\n validationIssues,\n ' - Attempt: ',\n attempts,\n ' - Max attempts: ',\n max_retries\n )\n attempts++\n return await makeCompletionCallWithRetries()\n } else {\n this.log(\n 'debug',\n `response model: ${response_model.name} - Max attempts reached: ${attempts}`\n )\n this.log(\n 'error',\n `response model: ${response_model.name} - Validation issues: `,\n validationIssues\n )\n throw error\n }\n }\n }\n return makeCompletionCallWithRetries()\n }\n\n /**\n * Executes a streaming chat completion, yielding partial results as they arrive.\n * Supports schema validation and collects usage metadata during the stream.\n *\n * Args:\n * params (ChatCompletionCreateParamsWithModel): Parameters for the chat completion, including the response model.\n * requestOptions (optional): Additional request options for the client.\n *\n * Returns:\n * AsyncGenerator<{ data: Partial<z.output<T>>[]; _meta: CompletionMeta }, void, unknown>: An async generator yielding partial completion data and metadata.\n */\n private async *chatCompletionStream<T extends z.ZodType>(\n { max_retries, response_model, ...params }: ChatCompletionCreateParamsWithModel<T>,\n requestOptions?: ClientTypeChatCompletionRequestOptions<C>\n ): AsyncGenerator<{ data: Partial<z.output<T>>[]; _meta: CompletionMeta }, void, unknown> {\n if (max_retries) {\n this.log('warn', 'max_retries is not supported for streaming completions')\n }\n const paramsTransformer = (\n PROVIDER_PARAMS_TRANSFORMERS?.[this.provider] as Record<string, unknown> | undefined\n )?.[this.mode as unknown as string] as ((p: unknown) => unknown) | undefined\n\n let completionParams = withResponseModel({\n params: {\n ...params,\n stream: true,\n } as OpenAI.ChatCompletionCreateParams,\n response_model,\n mode: this.mode,\n })\n\n if (typeof paramsTransformer === 'function') {\n completionParams = paramsTransformer(completionParams as unknown) as typeof completionParams\n }\n\n const streamClient = new ZodStream({\n debug: this.debug ?? false,\n })\n\n const checkForUsage = async (\n reader: Stream<OpenAI.ChatCompletionChunk> | AsyncIterable<OpenAI.ChatCompletionChunk>\n ) => {\n for await (const chunk of reader) {\n if ('usage' in chunk) {\n streamUsage = chunk.usage as CompletionMeta['usage']\n }\n }\n }\n let streamUsage: CompletionMeta['usage'] | undefined\n const structuredStream = await streamClient.create({\n completionPromise: async () => {\n if (this.client.chat?.completions?.create) {\n const completion = await this.client.chat.completions.create(\n {\n ...completionParams,\n stream: true,\n },\n requestOptions\n )\n this.log('debug', 'raw stream completion response: ', completion)\n if (\n this.provider === 'OAI' &&\n completionParams?.stream &&\n 'stream_options' in completionParams &&\n completion instanceof Stream\n ) {\n const [completion1, completion2] = completion.tee()\n checkForUsage(completion1)\n return OAIStream({\n res: completion2,\n })\n }\n if (\n this.provider !== 'OAI' &&\n completionParams?.stream &&\n (completion as unknown as { [Symbol.asyncIterator]?: unknown })?.[Symbol.asyncIterator]\n ) {\n const [completion1, completion2] = await iterableTee(\n completion as AsyncIterable<OpenAI.ChatCompletionChunk>,\n 2\n )\n checkForUsage(completion1)\n return OAIStream({\n res: completion2,\n })\n }\n return OAIStream({\n res: completion as unknown as AsyncIterable<OpenAI.ChatCompletionChunk>,\n })\n } else {\n throw new UnsupportedClientError('Unsupported client type')\n }\n },\n response_model: { schema: response_model.schema },\n })\n for await (const chunk of structuredStream) {\n yield {\n data: chunk.data,\n _meta: {\n usage: streamUsage ?? undefined,\n ...(chunk?._meta ?? {}),\n },\n }\n }\n }\n\n /**\n * Determines if the provided parameters include a response model for schema validation.\n * Used to distinguish between typed and untyped completion requests.\n *\n * Args:\n * params (ChatCompletionCreateParamsWithModel): The parameters to check.\n *\n * Returns:\n * boolean: True if the parameters include a response model, false otherwise.\n */\n private isChatCompletionCreateParamsWithModel<T extends z.ZodType>(\n params: ChatCompletionCreateParamsWithModel<T>\n ): params is ChatCompletionCreateParamsWithModel<T> {\n return 'response_model' in params\n }\n\n /**\n * Checks if the given parameters specify a standard streaming completion.\n * Returns true if the 'stream' property is set to true.\n *\n * Args:\n * params (OpenAI.ChatCompletionCreateParams): The parameters to check.\n *\n * Returns:\n * boolean: True if streaming is enabled, false otherwise.\n */\n private isStandardStream(\n params: OpenAI.ChatCompletionCreateParams\n ): params is OpenAI.ChatCompletionCreateParams {\n return 'stream' in params && params.stream === true\n }\n\n /**\n * Provides a unified interface for creating chat completions, supporting both standard and streaming modes.\n * Automatically selects the appropriate completion method based on the provided parameters.\n *\n * Args:\n * params: The parameters for the chat completion, with or without a response model.\n * requestOptions (optional): Additional request options for the client.\n *\n * Returns:\n * Promise or AsyncGenerator: The completion result, type depends on the parameters.\n *\n * Raises:\n * UnsupportedClientError: If the client does not support completions.\n */\n public chat = {\n completions: {\n create: async <\n T extends z.ZodType,\n P extends T extends z.ZodType ? ChatCompletionCreateParamsWithModel<T>\n : ClientTypeChatCompletionParams<OpenAILikeClient<C>> & { response_model: never },\n >(\n params: P,\n requestOptions?: ClientTypeChatCompletionRequestOptions<C>\n ): Promise<ReturnTypeBasedOnParams<typeof this.client, P>> => {\n if (this.isChatCompletionCreateParamsWithModel(params)) {\n if (params.stream) {\n return this.chatCompletionStream(params, requestOptions) as ReturnTypeBasedOnParams<\n typeof this.client,\n P & { stream: true }\n >\n } else {\n return this.chatCompletionStandard(params, requestOptions) as ReturnTypeBasedOnParams<\n typeof this.client,\n P\n >\n }\n } else if (this.client.chat?.completions?.create) {\n const result =\n this.isStandardStream(params) ?\n await this.client.chat.completions.create(params, requestOptions)\n : await this.client.chat.completions.create(params, requestOptions)\n return result as unknown as ReturnTypeBasedOnParams<OpenAILikeClient<C>, P>\n } else {\n throw new UnsupportedClientError('Completion method is undefined')\n }\n },\n },\n }\n}\n\nexport type InstructorClient<C> = Instructor<C> & OpenAILikeClient<C>\n\n/**\n * Creates an instance of the `Instructor` class.\n * @returns {InstructorClient} The extended OpenAI client.\n * @example import createInstructor from \"@instructor-ai/instructor\"\nimport OpenAI from \"openai\"\n\nconst OAI = new OpenAi({})\n\nconst client = createInstructor({\nclient: OAI,\nmode: \"TOOLS\",\n})\n * @param args\n * @returns\n */\nexport default function createInstructor<C>(args: InstructorConfig<C>): InstructorClient<C> {\n const instructor = new Instructor<C>(args)\n const instructorWithProxy = new Proxy(instructor, {\n get: (target, prop, receiver) => {\n if (prop in target) {\n return Reflect.get(target, prop, receiver)\n }\n return Reflect.get(target.client, prop, receiver)\n },\n })\n return instructorWithProxy as InstructorClient<C>\n}\n\nfunction isGenericClient(client: unknown): client is GenericClient {\n return (\n typeof client === 'object' &&\n client !== null &&\n 'chat' in client &&\n typeof client.chat === 'object' &&\n client.chat !== null &&\n 'completions' in client.chat &&\n typeof client.chat.completions === 'object' &&\n client.chat.completions !== null &&\n 'create' in client.chat.completions &&\n typeof client.chat.completions.create === 'function'\n )\n}\n","/**\n * Omits the specified keys from the given object.\n *\n * @template T The type of the object.\n * @template K The type of the keys to omit.\n * @param {K[]} keys The keys to omit.\n * @param {T} obj The object to omit from.\n * @returns {Omit<T, K>} The object with the specified keys omitted.\n */\nexport function omit<T extends object, K extends keyof T>(keys: K[], obj: T): Omit<T, K> {\n const result = {} as Omit<T, K>\n for (const key in obj) {\n if (obj.hasOwnProperty(key) && !keys.includes(key as unknown as K)) {\n result[key as unknown as Exclude<keyof T, K>] = obj[key] as unknown as T[Exclude<keyof T, K>]\n }\n }\n return result\n}\n\n/**\n * Creates `n` async generators that emit the elements of the given iterable in\n * the same order, but allows the consumer to iterate over the elements in\n * parallel. The returned generators are \"hot\", meaning that they will queue up\n * elements from the iterable even if no one is iterating over them.\n *\n * @template T The type of the elements in the iterable.\n * @param {AsyncIterable<T>} iterable The iterable to tee.\n * @param {number} n The number of async generators to create.\n * @returns {Promise<AsyncGenerator<T>[]>} A promise that resolves to an array of\n * `n` async generators.\n */\nexport async function iterableTee<T>(\n iterable: AsyncIterable<T>,\n n: number\n): Promise<AsyncGenerator<T>[]> {\n const buffers: T[][] = Array.from({ length: n }, () => [])\n const resolvers: (() => void)[] = []\n const iterator = iterable[Symbol.asyncIterator]()\n let done = false\n\n const reader = async function* (index: number): AsyncGenerator<T> {\n while (true) {\n if (buffers[index].length > 0) {\n yield buffers[index].shift()!\n } else if (done) {\n break\n } else {\n await new Promise<void>((resolve) => resolvers.push(resolve))\n }\n }\n }\n await (async () => {\n for await (const item of {\n [Symbol.asyncIterator]: () => iterator,\n }) {\n for (const buffer of buffers) {\n buffer.push(item)\n }\n\n while (resolvers.length > 0) {\n resolvers.shift()!()\n }\n }\n done = true\n while (resolvers.length > 0) {\n resolvers.shift()!()\n }\n })()\n\n return Array.from({ length: n }, (_, i) => reader(i))\n}\n","export const MODE = {\n FUNCTIONS: 'FUNCTIONS',\n TOOLS: 'TOOLS',\n JSON: 'JSON',\n MD_JSON: 'MD_JSON',\n JSON_SCHEMA: 'JSON_SCHEMA',\n THINKING_MD_JSON: 'THINKING_MD_JSON',\n} as const\n","import { omit } from '@/lib'\nimport {\n FunctionParamsReturnType,\n JsonModeParamsReturnType,\n JsonSchemaParamsReturnType,\n MessageBasedParamsReturnType,\n ParseParams,\n ToolFunctionParamsReturnType,\n} from '@/types'\nimport OpenAI from 'openai'\n\n// TODO: remove the deprecated `functions` and use tools instead\n// TODO: separate this logic into something that can be surfaced to the user, this is important for the system prompt\n// TODO: Which is now called the developer prompt\n// TODO: Also consider using the `responses` api instead of the Chat Completion API\nexport function OAIBuildFunctionParams<T extends OpenAI.ChatCompletionCreateParams>(\n definition: ParseParams,\n params: T\n): FunctionParamsReturnType<T> {\n const { name, description, ...definitionParams } = definition\n const function_call: OpenAI.ChatCompletionFunctionCallOption = {\n name,\n }\n const functions: OpenAI.FunctionDefinition[] = [\n ...(params?.functions ?? []),\n {\n name: name,\n description: description ?? undefined,\n parameters: definitionParams,\n },\n ]\n return {\n ...params,\n function_call,\n functions,\n }\n}\n\nexport function OAIBuildToolFunctionParams<T extends OpenAI.ChatCompletionCreateParams>(\n definition: ParseParams,\n params: T\n): ToolFunctionParamsReturnType<T> {\n const { name, description, ...definitionParams } = definition\n const tool_choice: OpenAI.ChatCompletionToolChoiceOption = {\n type: 'function',\n function: { name },\n }\n const tools: OpenAI.ChatCompletionTool[] = [\n {\n type: 'function',\n function: {\n name: name,\n description: description,\n parameters: definitionParams,\n },\n },\n ...(params.tools?.map(\n (tool): OpenAI.ChatCompletionTool => ({\n type: tool.type,\n function: {\n name: tool.function.name,\n description: tool.function.description,\n parameters: tool.function.parameters,\n },\n })\n ) ?? []),\n ]\n return {\n ...params,\n tool_choice,\n tools,\n }\n}\n\nexport function OAIBuildMessageBasedParams<T extends OpenAI.ChatCompletionCreateParams>(\n definition: ParseParams,\n params: T\n): MessageBasedParamsReturnType<T> {\n return {\n ...params,\n messages: [\n {\n role: 'system',\n content: `\n Given a user prompt, you will return fully valid JSON based on the following description and schema.\n You will return no other prose. You will take into account any descriptions or required parameters within the schema\n and return a valid and fully escaped JSON object that matches the schema and those instructions.\n\n description: ${definition.description}\n json schema: ${JSON.stringify(definition)}\n `,\n },\n ...params.messages,\n ],\n }\n}\n\n// TODO: Surface this to the user and convert to the responses api\nexport function OAIBuildThinkingMessageBasedParams<T extends OpenAI.ChatCompletionCreateParams>(\n definition: ParseParams,\n params: T\n): MessageBasedParamsReturnType<T> {\n return {\n ...params,\n messages: [\n {\n role: 'system',\n content: `\n Given a user prompt, you will return fully valid JSON based on the provided description and schema.\n\n You will take into account any descriptions or required parameters within the schema\n and return a valid and fully escaped JSON object that matches the schema and those instructions.\n\n You will always return your full thought process in one <think> tag and then return the JSON response in a \\`\\`\\`json block after the </think> tag. Never include any prose or thinking process outside of the <think> tag.\n \n For example:\n <think>\n I am analyzing the input to extract the required information...\n </think>\n\n \\`\\`\\`json\n {\n \"result\": \"the actual json response\"\n }\n \\`\\`\\`\n\n \\n\\n\n\n description: ${definition.description}\n json schema: ${JSON.stringify(definition)}\n `,\n },\n ...params.messages,\n ],\n }\n}\n\n// TODO: deprecate some of these because they don't supports streaming and are unnecessary\nexport function OAIBuildJsonModeParams<T extends OpenAI.ChatCompletionCreateParams>(\n definition: ParseParams,\n params: T\n): JsonModeParamsReturnType<T> {\n return {\n ...params,\n response_format: { type: 'json_object' },\n messages: [\n {\n role: 'system',\n content: `\n Given a user prompt, you will return fully valid JSON based on the following description and schema.\n You will return no other prose. You will take into account any descriptions or required parameters within the schema\n and return a valid and fully escaped JSON object that matches the schema and those instructions.\n\n description: ${definition.description}\n json schema: ${JSON.stringify(definition)}\n `,\n },\n ...params.messages,\n ],\n }\n}\n\n// TODO: deprecate some of these because they don't supports streaming and are unnecessary\nexport function OAIBuildJsonSchemaParams<T extends OpenAI.ChatCompletionCreateParams>(\n definition: ParseParams,\n params: T\n): JsonSchemaParamsReturnType<T> {\n return {\n ...params,\n response_format: {\n type: 'json_object',\n schema: omit(['name', 'description'], definition),\n },\n messages: [\n {\n role: 'system',\n content: `\n Given a user prompt, you will return fully valid JSON based on the following description.\n You will return no other prose. You will take into account any descriptions or required parameters within the schema\n and return a valid and fully escaped JSON object that matches the schema and those instructions.\n\n description: ${definition.description}\n `,\n },\n ...params.messages,\n ],\n }\n}\n","import { MODE } from '@/constants'\nimport {\n OAIBuildFunctionParams,\n OAIBuildJsonModeParams,\n OAIBuildJsonSchemaParams,\n OAIBuildMessageBasedParams,\n OAIBuildThinkingMessageBasedParams,\n OAIBuildToolFunctionParams,\n} from './oai/params.ts'\nimport OpenAI from 'openai'\nimport * as z from 'zod'\nimport { Mode, ModeParamsReturnType, ResponseModel } from '@/types'\n\nexport function withResponseModel<\n T extends z.ZodType,\n M extends Mode,\n P extends OpenAI.ChatCompletionCreateParams,\n>({\n response_model: { name, schema, description = '' },\n mode,\n params,\n}: {\n response_model: ResponseModel<T>\n mode: M\n params: P\n}): ModeParamsReturnType<P, M> {\n const safeName = name.replace(/[^a-zA-Z0-9]/g, '_').replace(/\\s/g, '_')\n // TODO: Align schema creation with the zod registry paradigm\n // We may be able to get a schema that is closer to what we get from Pydantic with\n // Examples and titles. We may also be able to have it align perfectly with OAI Structured\n // Outputs, for those who want that use case.\n const jsonSchema = z.toJSONSchema(schema)\n\n /**\n * Remove $schema field for compatibility OAI JSON Schema\n * We delete it in place no copy is made or is necessary\n */\n delete jsonSchema.$schema\n const definition = {\n name: safeName,\n description,\n ...jsonSchema,\n }\n // TODO: remove the deprecated `functions` and use tools instead\n // This can be simplified down to two modes, tools and json,\n // TOOLS are meant to be parameters that get passed to a function, with the result being passed back\n // to the model before before the turn is complete\n // JSON is meant to be json data that is passed back to the user by having only two modes\n // We can make it much easier to maintain by using an adapter pattern for the different model providers\n // input interface -> model provider adapter -> transformer -> output interface\n if (mode === MODE.FUNCTIONS) {\n return OAIBuildFunctionParams<P>(definition, params) as ModeParamsReturnType<P, M>\n }\n\n if (mode === MODE.TOOLS) {\n return OAIBuildToolFunctionParams<P>(definition, params) as ModeParamsReturnType<P, M>\n }\n\n if (mode === MODE.JSON) {\n return OAIBuildJsonModeParams<P>(definition, params) as ModeParamsReturnType<P, M>\n }\n\n if (mode === MODE.JSON_SCHEMA) {\n return OAIBuildJsonSchemaParams<P>(definition, params) as ModeParamsReturnType<P, M>\n }\n\n if (mode === MODE.MD_JSON) {\n return OAIBuildMessageBasedParams<P>(definition, params) as ModeParamsReturnType<P, M>\n }\n\n if (mode === MODE.THINKING_MD_JSON) {\n return OAIBuildThinkingMessageBasedParams<P>(definition, params) as ModeParamsReturnType<P, M>\n }\n\n return OAIBuildMessageBasedParams<P>(definition, params) as ModeParamsReturnType<P, M>\n}\n","import OpenAI from 'openai'\nimport { z } from 'zod'\nimport { omit } from '@/lib'\nimport { thinkingJsonParser, withResponseModel } from '@/stream'\nimport { Mode } from '@/types'\nimport { MODE as ZMODE } from '@/constants'\n\nexport const MODE: typeof ZMODE = ZMODE\n\nexport const MODE_TO_RESPONSE_PARSER = {\n [MODE.THINKING_MD_JSON]: thinkingJsonParser,\n}\n\nexport const PROVIDERS = {\n OAI: 'OAI',\n ANYSCALE: 'ANYSCALE',\n TOGETHER: 'TOGETHER',\n ANTHROPIC: 'ANTHROPIC',\n GROQ: 'GROQ',\n OTHER: 'OTHER',\n} as const\n\nexport type Provider = keyof typeof PROVIDERS\n\nexport const PROVIDER_SUPPORTED_MODES: {\n [key in Provider]: Mode[]\n} = {\n [PROVIDERS.OTHER]: [MODE.FUNCTIONS, MODE.TOOLS, MODE.JSON, MODE.JSON_SCHEMA, MODE.MD_JSON],\n [PROVIDERS.OAI]: [MODE.FUNCTIONS, MODE.TOOLS, MODE.JSON, MODE.MD_JSON],\n [PROVIDERS.ANYSCALE]: [MODE.TOOLS, MODE.JSON, MODE.JSON_SCHEMA, MODE.MD_JSON],\n [PROVIDERS.TOGETHER]: [MODE.TOOLS, MODE.JSON, MODE.JSON_SCHEMA, MODE.MD_JSON],\n [PROVIDERS.ANTHROPIC]: [MODE.MD_JSON, MODE.TOOLS],\n [PROVIDERS.GROQ]: [MODE.TOOLS, MODE.FUNCTIONS, MODE.MD_JSON],\n} as const\n\nexport const NON_OAI_PROVIDER_URLS = {\n [PROVIDERS.ANYSCALE]: 'api.endpoints.anyscale',\n [PROVIDERS.TOGETHER]: 'api.together.xyz',\n [PROVIDERS.OAI]: 'api.openai.com',\n [PROVIDERS.ANTHROPIC]: 'api.anthropic.com',\n [PROVIDERS.GROQ]: 'api.groq.com',\n} as const\n\nexport const PROVIDER_PARAMS_TRANSFORMERS = {\n [PROVIDERS.GROQ]: {\n [MODE.TOOLS]: function groqToolsParamsTransformer<\n T extends z.ZodType,\n P extends OpenAI.ChatCompletionCreateParams,\n >(params: ReturnType<typeof withResponseModel<T, 'TOOLS', P>>) {\n if (\n params.tools &&\n params.tools.some((tool: OpenAI.ChatCompletionTool) => tool) &&\n params.stream\n ) {\n console.warn('Streaming may not be supported when using tools in Groq, try MD_JSON instead')\n return params\n }\n return params\n },\n },\n [PROVIDERS.ANYSCALE]: {\n [MODE.JSON_SCHEMA]: function removeAdditionalPropertiesKeyJSONSchema<\n T extends z.ZodType,\n P extends OpenAI.ChatCompletionCreateParams,\n >(params: ReturnType<typeof withResponseModel<T, 'JSON_SCHEMA', P>>) {\n // @ts-expect-error - these types will get simplified and fixed later\n if (params.response_format && 'additionalProperties' in params.response_format.schema) {\n return {\n ...params,\n response_format: {\n ...params.response_format,\n schema: omit(['additionalProperties'], params.response_format.schema),\n },\n }\n }\n return params\n },\n [MODE.TOOLS]: function removeAdditionalPropertiesKeyTools<\n T extends z.ZodType,\n P extends OpenAI.ChatCompletionCreateParams,\n >(params: ReturnType<typeof withResponseModel<T, 'TOOLS', P>>) {\n if (\n params.tools &&\n params.tools.some((tool: OpenAI.ChatCompletionTool) => tool.function?.parameters)\n ) {\n return {\n ...params,\n tools: params.tools.map((tool: OpenAI.ChatCompletionTool) => {\n if (tool.function?.parameters) {\n return {\n ...tool,\n function: {\n ...tool.function,\n parameters: omit(['additionalProperties'], tool.function.parameters),\n },\n }\n }\n return tool\n }),\n }\n }\n return params\n },\n },\n [PROVIDERS.OAI]: {},\n [PROVIDERS.TOGETHER]: {},\n [PROVIDERS.ANTHROPIC]: {},\n [PROVIDERS.OTHER]: {},\n} as const\n\nexport const PROVIDER_SUPPORTED_MODES_BY_MODEL = {\n [PROVIDERS.OTHER]: {\n [MODE.FUNCTIONS]: ['*'],\n [MODE.TOOLS]: ['*'],\n [MODE.JSON]: ['*'],\n [MODE.