evalz
Version:
Model graded evals with typescript
1 lines • 30.2 kB
Source Map (JSON)
{"version":3,"sources":["../src/evaluators/index.ts","../src/constants/prompts.ts","../src/evaluators/weighted.ts","../src/types/index.ts","../src/evaluators/accuracy.ts","../src/lib/cosine.ts","../src/evaluators/context.ts"],"names":["createInstructor","z","CUSTOM_EVALUATOR_IDENTITY","RESPONSE_TYPE_EVALUATOR_SCORE","RESPONSE_TYPE_EVALUATOR_BINARY","RESULTS_TYPE_PROMPT","scoringSchema","createEvaluator","resultsType","evaluationDescription","model","messages","client","instructorClient","execute","data","evaluationResults","item","prompt","completion","expectedCompletion","resultObject","avgScore","sum","score","binaryResults","acc","createWeightedEvaluator","evaluators","weights","weight","key","validResults","contexts","groundTruth","evaluator","isAccuracyEvaluator","isModelGradedEvaluator","isContextEvaluator","result","error","e","index","weightedScore","individualAvgScores","scores","vr","s","er","BaseEvaluationDataItemSchema","EvaluationDataItemSchema","EvaluationDataItemResultSchema","distance","OpenAI","dot","a","b","val","cosineSimilarity","dotProduct","magnitudeA","magnitudeB","createAccuracyEvaluator","openai","factualScore","completionEmbedding","expectedEmbedding","semanticScore","levenshteinDistance","extractEntities","text","createContextEvaluator","type","completionEntities","context","entity","completionEmbeddingInput","groundTruthEmbedding","contextEmbeddings","truePositives","falsePositives","embeddingResult","completionSimilarity","groundTruthSimilarity","precision","completionSentences","sentence","contextSentences","sentenceIntersection","contextSentence","embeddingInput","relevanceScores","embedding"],"mappings":"AACA,OAAOA,MAAsB,4BAE7B,OAAOC,MAAO,MCDP,IAAMC,EACX,sWAEWC,EACX,wRAEWC,EACX,kQAEWC,EAAmD,CAC9D,MAAOF,EACP,OAAQC,CACV,EDPA,IAAME,EAAgBL,EAAE,OAAO,CAC7B,MAAOA,EAAE,OAAO,CAClB,CAAC,EAEM,SAASM,EAAuC,CACrD,YAAAC,EAAc,QACd,sBAAAC,EACA,MAAAC,EACA,SAAAC,EACA,OAAAC,CACF,EAMiB,CACf,GAAI,CAACH,GAAyB,OAAOA,GAA0B,SAC7D,MAAM,IAAI,MAAM,0CAA0C,EAG5D,IAAMI,EAAmBb,EAAyB,CAChD,OAAAY,EACA,KAAM,OACR,CAAC,EAEKE,EAAU,MAAO,CAAE,KAAAC,CAAK,IAAyD,CACrF,IAAMC,EAAoB,MAAM,QAAQ,IACtCD,EAAK,IAAI,MAAME,GAAQ,CACrB,GAAM,CAAE,OAAAC,EAAQ,WAAAC,EAAY,mBAAAC,CAAmB,EAAIH,EA8BnD,MAAO,CACL,OA7Be,MAAMJ,EAAiB,KAAK,YAAY,OAAO,CAC9D,YAAa,EACb,MAAOH,GAAS,cAChB,eAAgB,CACd,OAAQJ,EACR,KAAM,SACR,EACA,SAAU,CACR,CACE,KAAM,SACN,QAASJ,CACX,EACA,CACE,KAAM,SACN,QAASG,EAAoBG,CAAW,CAC1C,EACA,CACE,KAAM,SACN,QAASC,CACX,EACA,GAAIE,GAAY,CAAC,EACjB,CACE,KAAM,SACN,QAAS,WAAWO,CAAM;AAAA,eAAmBC,CAAU;AAAA,IAAOC,GAAoB,OAAS,uBAAuBA,CAAkB;AAAA,EAAO,GAAG,gCAChJ,CACF,CACF,CAAC,GAGiB,MAChB,KAAAH,CACF,CACF,CAAC,CACH,EAEII,EAEJ,GAAIb,IAAgB,QAAS,CAC3B,IAAMc,EACJN,EAAkB,OAAO,CAACO,EAAK,CAAE,MAAAC,EAAQ,CAAE,IAAMD,EAAMC,EAAO,CAAC,EAAIR,EAAkB,OAEvFK,EAAe,CACb,QAASL,EACT,aAAc,CACZ,MAAOM,CACT,CACF,CACF,CAEA,GAAId,IAAgB,SAAU,CAC5B,IAAMiB,EAAgBT,EAAkB,OACtC,CAACU,EAAK,CAAE,MAAAF,CAAM,KACRA,GAAS,EACXE,EAAI,YAEJA,EAAI,aAECA,GAET,CAAE,UAAW,EAAG,WAAY,CAAE,CAChC,EAEAL,EAAe,CACb,QAASL,EACT,cAAAS,CACF,CACF,CAEA,GAAI,CAACJ,EAAc,MAAM,IAAI,MAAM,8BAA8B,EAEjE,OAAOA,CACT,EAEA,OAAAP,EAAQ,SAAW,eAEZA,CACT,CExGO,SAASa,EAAwB,CACtC,WAAAC,EACA,QAAAC,CACF,EAGuB,CAErB,GADoB,OAAO,OAAOA,CAAO,EAAE,OAAO,CAACN,EAAKO,IAAWP,EAAMO,EAAQ,CAAC,IAC9D,EAClB,MAAM,IAAI,MAAM,8BAA8B,EAGhD,GACE,OAAO,KAAKD,CAAO,EAAE,SAAW,OAAO,KAAKD,CAAU,EAAE,QACxD,CAAC,OAAO,KAAKC,CAAO,EAAE,MAAME,GAAOA,KAAOH,CAAU,EAEpD,MAAM,IAAI,MAAM,iEAAiE,EAGnF,IAAMd,EAAU,MAAO,CAAE,KAAAC,CAAK,IAA+D,CAoF3F,IAAMiB,GAnFoB,MAAM,QAAQ,IACtCjB,EAAK,IAAI,MAAME,GAAQ,CACrB,GAAM,CACJ,OAAAC,EAAS,GACT,WAAAC,EACA,mBAAAC,EAAqB,GACrB,SAAAa,EAAW,CAAC,EACZ,YAAAC,EAAc,EAChB,EAAIjB,EAiDEe,GA/CmB,MAAM,QAAQ,IACrC,OAAO,KAAKJ,CAAU,EAAE,IAAI,MAAMG,GAAO,CACvC,IAAMI,EAAYP,EAAWG,CAAG,EAE1BK,EAAsBD,EAAU,WAAa,WAC7CE,EAAyBF,EAAU,WAAa,eAChDG,EAAqBH,EAAU,UAAU,WAAW,UAAU,EAEhEC,EACF,QAAQ,IAAI,cAAcL,CAAG,gBAAgB,EACpCM,EACT,QAAQ,IAAI,cAAcN,CAAG,oBAAoB,EACxCO,GACT,QAAQ,IAAI,cAAcP,CAAG,SAASI,EAAU,QAAQ,EAAE,EAG5D,GAAI,CACF,IAAMI,EAASH,EACX,MAAOD,EAAgC,CACrC,KAAM,CAAC,CAAE,WAAAhB,EAAY,mBAAAC,CAAmB,CAAC,CAC3C,CAAC,EACD,MAAOe,EAAiC,CACtC,KAAM,CACJ,CACE,OAAAjB,EACA,WAAAC,EACA,mBAAAC,EACA,SAAAa,EACA,YAAAC,CACF,CACF,CACF,CAAC,EAEL,OAAOK,GAAQ,cAAc,QAAU,OACnC,CACE,MAAOA,GAAQ,cAAc,MAC7B,UAAWR,EACX,cAAeI,EAAU,QAC3B,EACA,MACN,OAASK,EAAO,CACd,QAAQ,MAAM,oBAAoBT,CAAG,IAAKS,CAAK,EAC/C,MACF,CACF,CAAC,CACH,GAEsC,OACnCC,GAAkCA,IAAM,MAC3C,EAEA,OAAIT,EAAa,SAAW,GAC1B,QAAQ,KAAK,uBAAwBf,CAAI,EAClC,CACL,MAAO,IACP,OAAQ,CAAC,EACT,KAAAA,CACF,GAQK,CACL,MANoB,OAAO,KAAKY,CAAO,EAAE,OACzC,CAACN,EAAKQ,EAAKW,IAAUnB,EAAMM,EAAQE,CAAG,GAAKC,IAAeU,CAAK,GAAG,OAAS,GAC3E,CACF,EAIE,OAAQV,EACR,KAAAf,CACF,CACF,CAAC,CACH,GAEuC,OACpC,GAAkC,CAAC,MAAM,EAAE,KAAK,CACnD,EAEM0B,EACJX,EAAa,OAAS,EAClBA,EAAa,OAAO,CAACT,EAAK,CAAE,MAAAC,EAAQ,CAAE,IAAMD,EAAMC,EAAO,CAAC,EAAIQ,EAAa,OAC3E,EAEAY,EAAsB,OAAO,KAAKhB,CAAU,EAAE,OAClD,CAACF,EAAKK,IAAQ,CACZ,IAAMc,EAASb,EAAa,IAAIc,GAAMA,EAAG,OAAO,KAAKC,GAAKA,EAAE,YAAchB,CAAG,GAAG,OAAS,CAAC,EACpFT,EAAWuB,EAAO,OAAO,CAACtB,EAAKC,IAAUD,EAAMC,EAAO,CAAC,EAAIqB,EAAO,OACxE,OAAAnB,EAAIK,CAAG,EAAIT,EAEJI,CACT,EACA,CAAC,CACH,EAEA,MAAO,CACL,QAASM,EAAa,IAAIgB,IAAO,CAC/B,GAAGA,EACH,MAAOA,EAAG,KACZ,EAAE,EACF,aAAc,CACZ,MAAOL,EACP,WAAYC,CACd,CACF,CACF,EAEA,OAAA9B,EAAQ,SAAW,WACZA,CACT,CCnJA,OAAS,KAAAb,MAAS,MAEX,IAAMgD,EAA+BhD,EAAE,OAAO,CACnD,OAAQA,EAAE,OAAO,EAAE,SAAS,EAC5B,WAAYA,EAAE,OAAO,EACrB,mBAAoBA,EAAE,OAAO,EAAE,SAAS,EACxC,SAAUA,EAAE,MAAMA,EAAE,OAAO,CAAC,EAAE,SAAS,EACvC,YAAaA,EAAE,OAAO,EAAE,SAAS,CACnC,CAAC,EAEYiD,EAA2BD,EAE3BE,EAAiClD,EAAE,OAAO,CACrD,MAAOA,EAAE,OAAO,EAChB,OAAQA,EACL,MACCA,EAAE,OAAO,CACP,MAAOA,EAAE,OAAO,EAChB,UAAWA,EAAE,OAAO,EACpB,cAAeA,EAAE,OAAO,CAC1B,CAAC,CACH,EACC,SAAS,EACZ,KAAMiD,CACR,CAAC,ECvBD,OAAS,YAAAE,MAAgB,sBACzB,OAAOC,MAAY,SCInB,SAASC,EAAIC,EAAaC,EAAqB,CAC7C,OAAOD,EAAE,OAAO,CAAChC,EAAKkC,EAAKf,IAAUnB,EAAMkC,EAAMD,EAAEd,CAAK,EAAG,CAAC,CAC9D,CAQO,SAASgB,EAAiBH,EAAaC,EAAqB,CACjE,IAAMG,EAAaL,EAAIC,EAAGC,CAAC,EACrBI,EAAa,KAAK,KAAKN,EAAIC,EAAGA,CAAC,CAAC,EAChCM,EAAa,KAAK,KAAKP,EAAIE,EAAGA,CAAC,CAAC,EACtC,OAAOI,GAAcC,EAAaF,GAAcC,EAAaC,GAAc,CAC7E,CDfO,SAASC,GAAwB,CACtC,MAAApD,EACA,QAAAmB,EAAU,CAAE,QAAS,GAAK,SAAU,EAAI,CAC1C,EAGuB,CACrB,IAAMf,EAAU,MAAO,CACrB,KAAAC,CACF,IAE4C,CAC1C,IAAMgD,EAAS,IAAIV,EAAO,CAAE,OAAQ,QAAQ,IAAI,cAAkB,CAAC,EAgD7DrB,GA9CoB,MAAM,QAAQ,IACtCjB,EAAK,IAAI,MAAME,GAAQ,CACrB,GAAM,CAAE,WAAAE,EAAY,mBAAAC,CAAmB,EAAIH,EAE3C,GAAI,CAACE,GAAc,CAACC,EAAoB,CACtC,QAAQ,KAAK,+CAA+C,EAC5D,MACF,CAEA,GAAI,CAEF,IAAM4C,EACJ,EAFsBZ,EAASjC,EAAYC,CAAkB,EAEvC,KAAK,IAAID,EAAW,OAAQC,EAAmB,MAAM,EAEvE,CAAC6C,EAAqBC,CAAiB,EAAI,MAAM,QAAQ,IAAI,CACjEH,EAAO,WAAW,OAAO,CACvB,MAAO,CAAC5C,CAAU,EAClB,MAAOT,GAAS,wBAClB,CAAC,EACDqD,EAAO,WAAW,OAAO,CACvB,MAAO,CAAC3C,CAAkB,EAC1B,MAAOV,GAAS,wBAClB,CAAC,CACH,CAAC,EAEKyD,EAAgBT,EACpBO,EAAoB,KAAK,CAAC,EAAE,UAC5BC,EAAkB,KAAK,CAAC,EAAE,SAC5B,EAEM1C,EAAQK,EAAQ,QAAUmC,EAAenC,EAAQ,SAAWsC,EAElE,MAAO,CACL,KAAM,CACJ,WAAAhD,EACA,mBAAAC,CACF,EACA,MAAAI,CACF,CACF,OAASgB,EAAO,CACd,QAAQ,MAAM,gCAAiCA,CAAK,EACpD,MACF,CACF,CAAC,CACH,GAEuC,OACpCC,GAAkCA,IAAM,MAC3C,EAEMnB,EACJU,EAAa,OAAS,EAClBA,EAAa,OAAO,CAACT,EAAK,CAAE,MAAAC,CAAM,IAAMD,EAAMC,EAAO,CAAC,EAAIQ,EAAa,OACvE,EAEN,MAAO,CACL,QAASA,EACT,aAAc,CACZ,MAAOV,CACT,CACF,CACF,EAEA,OAAAR,EAAQ,SAAW,WACZA,CACT,CE/EA,OAAS,YAAYsD,MAA2B,sBAChD,OAAOf,MAAY,SAInB,SAASgB,EAAgBC,EAAwB,CAC/C,OAAOA,EAAK,MAAM,kBAAkB,GAAK,CAAC,CAC5C,CAEO,SAASC,GAAuB,CACrC,KAAAC,EACA,MAAA9D,EAAQ,wBACV,EAGqB,CACnB,IAAMI,EAAU,MAAO,CAAE,KAAAC,CAAK,IAA+D,CAC3F,IAAMgD,EAAS,IAAIV,EAAO,CAAE,OAAQ,QAAQ,IAAI,cAAkB,CAAC,EAE7DrC,EAAoB,MAAM,QAAQ,IACtCD,EAAK,IAAI,MAAME,GAAQ,CACrB,GAAM,CAAE,OAAAC,EAAQ,SAAAe,EAAW,CAAC,EAAG,YAAAC,EAAc,GAAI,WAAAf,CAAW,EAAIF,EAE5DO,EAAQ,EAEZ,OAAQgD,EAAM,CACZ,IAAK,kBAAmB,CACtB,GAAI,CAACrD,EACH,MAAM,IAAI,MAAM,wDAAwD,EAG1E,IAAMsD,EAAqBJ,EAAgBlD,CAAU,EAKrDK,EAJwBS,EAAS,QAAQyC,GAAWL,EAAgBK,CAAO,CAAC,EACvC,OAAOC,GAC1CF,EAAmB,SAASE,CAAM,CACpC,EACqB,OAASF,EAAmB,OACjD,KACF,CAEA,IAAK,YAAa,CAChB,GAAI,CAACtD,EACH,MAAM,IAAI,MAAM,kDAAkD,EAEpE,IAAMyD,EAA2B1D,EAAS,GAAGA,CAAM,IAAIC,CAAU,GAAKA,EAChE8C,EAAsB,MAAMF,EAAO,WAAW,OAAO,CACzD,MAAO,CAACa,CAAwB,EAChC,MAAAlE,CACF,CAAC,EACKmE,EAAuB3C,EACzB,MAAM6B,EAAO,WAAW,OAAO,CAC7B,MAAO,CAAC7B,CAAW,EACnB,MAAAxB,CACF,CAAC,EACD,KACEoE,EAAoB,MAAM,QAAQ,IACtC7C,EAAS,IAAIyC,GAAWX,EAAO,WAAW,OAAO,CAAE,MAAO,CAACW,CAAO,EAAG,MAAAhE,CAAM,CAAC,CAAC,CAC/E,EAEIqE,EAAgB,EAChBC,EAAiB,EAyBrBxD,EAxBqBsD,EAAkB,IAAIG,GAAmB,CAC5D,IAAMC,EAAuBxB,EAC3BO,EAAoB,KAAK,CAAC,EAAE,UAC5BgB,EAAgB,KAAK,CAAC,EAAE,SAC1B,EACME,EAAwBN,EAC1BnB,EACEmB,EAAqB,KAAK,CAAC,EAAE,UAC7BI,EAAgB,KAAK,CAAC,EAAE,SAC1B,EACA,EAIJ,OAFsB,KAAK,IAAIC,EAAsBC,CAAqB,EAEtD,GAClBJ,GAAiB,EAEjBC,GAAkB,EAGKD,GAAiBA,EAAgBC,EAE5D,CAAC,EAEoB,OAAO,CAACzD,EAAK6D,IAAc7D,EAAM6D,EAAW,CAAC,EAAInD,EAAS,OAC/E,KACF,CAEA,IAAK,SAAU,CACb,GAAI,CAACd,EACH,MAAM,IAAI,MAAM,+CAA+C,EAEjE,IAAMkE,EAAsBlE,EACzB,MAAM,GAAG,EACT,IAAImE,GAAYA,EAAS,KAAK,CAAC,EAC/B,OAAO,OAAO,EACXC,EAAmBtD,EAAS,QAAQyC,GACxCA,EAAQ,MAAM,GAAG,EAAE,IAAIY,GAAYA,EAAS,KAAK,CAAC,CACpD,EAEME,EAAuBH,EAAoB,OAAOC,GACtDC,EAAiB,KAAKE,GACArB,EAAoBkB,EAAUG,CAAe,EAC5C,KAAK,IAAIH,EAAS,OAAQG,EAAgB,MAAM,EAAI,EAC1E,CACH,EAEAjE,EACE6D,EAAoB,OAAS,EACzBG,EAAqB,OAASH,EAAoB,OAClD,EACN,KACF,CAEA,IAAK,YAAa,CAChB,GAAI,CAAClE,EACH,MAAM,IAAI,MAAM,kDAAkD,EAEpE,IAAMuE,EAAiBxE,GAAUC,EAAa,GAAGD,CAAM,IAAIC,CAAU,GAAKA,EACpE8C,EAAsB,MAAMF,EAAO,WAAW,OAAO,CACzD,MAAO,CAAC2B,CAAc,EACtB,MAAAhF,CACF,CAAC,EAIKiF,GAHoB,MAAM,QAAQ,IACtC1D,EAAS,IAAIyC,GAAWX,EAAO,WAAW,OAAO,CAAE,MAAO,CAACW,CAAO,EAAG,MAAAhE,CAAM,CAAC,CAAC,CAC/E,GAC0C,IAAIkF,GAC5ClC,EAAiBO,EAAoB,KAAK,CAAC,EAAE,UAAW2B,EAAU,KAAK,CAAC,EAAE,SAAS,CACrF,EACApE,EAAQmE,EAAgB,OAAO,CAACpE,EAAKC,IAAUD,EAAMC,EAAO,CAAC,EAAImE,EAAgB,OACjF,KACF,CAEA,QACE,MAAM,IAAI,MAAM,gCAAgCnB,CAAI,EAAE,CAC1D,CAEA,MAAO,CACL,KAAAvD,EACA,MAAAO,CACF,CACF,CAAC,CACH,EAEMF,EACJN,EAAkB,OAAO,CAACO,EAAK,CAAE,MAAAC,CAAM,IAAMD,EAAMC,EAAO,CAAC,EAAIR,EAAkB,OAEnF,MAAO,CACL,QAASA,EACT,aAAc,CAAE,MAAOM,CAAS,CAClC,CACF,EAEA,OAAAR,EAAQ,SAAW,WAAW0D,CAAI,GAC3B1D,CACT","sourcesContent":["import { EvaluationResponse, Evaluator, ExecuteEvalParams, ResultsType } from \"@/types\"\nimport createInstructor from \"@instructor-ai/instructor\"\nimport OpenAI from \"openai\"\nimport z from \"zod\"\n\nimport { CUSTOM_EVALUATOR_IDENTITY, RESULTS_TYPE_PROMPT } from \"@/constants/prompts\"\n\nconst scoringSchema = z.object({\n score: z.number()\n})\n\nexport function createEvaluator<T extends ResultsType>({\n resultsType = \"score\" as T,\n evaluationDescription,\n model,\n messages,\n client\n}: {\n resultsType?: T\n evaluationDescription: string\n model?: OpenAI.Model[\"id\"]\n messages?: OpenAI.ChatCompletionMessageParam[]\n client: OpenAI\n}): Evaluator<T> {\n if (!evaluationDescription || typeof evaluationDescription !== \"string\") {\n throw new Error(\"Evaluation description was not provided.\")\n }\n\n const instructorClient = createInstructor<OpenAI>({\n client,\n mode: \"TOOLS\"\n })\n\n const execute = async ({ data }: ExecuteEvalParams): Promise<EvaluationResponse<T>> => {\n const evaluationResults = await Promise.all(\n data.map(async item => {\n const { prompt, completion, expectedCompletion } = item\n\n const response = await instructorClient.chat.completions.create({\n max_retries: 3,\n model: model ?? \"gpt-4-turbo\",\n response_model: {\n schema: scoringSchema,\n name: \"Scoring\"\n },\n messages: [\n {\n role: \"system\",\n content: CUSTOM_EVALUATOR_IDENTITY\n },\n {\n role: \"system\",\n content: RESULTS_TYPE_PROMPT[resultsType]\n },\n {\n role: \"system\",\n content: evaluationDescription\n },\n ...(messages ?? []),\n {\n role: \"system\",\n content: `prompt: ${prompt} \\n completion: ${completion}\\n ${expectedCompletion?.length ? `expectedCompletion: ${expectedCompletion}\\n` : \" \"}Please provide your score now:`\n }\n ]\n })\n\n return {\n score: response[\"score\"],\n item\n }\n })\n )\n\n let resultObject\n\n if (resultsType === \"score\") {\n const avgScore =\n evaluationResults.reduce((sum, { score = 0 }) => sum + score, 0) / evaluationResults.length\n\n resultObject = {\n results: evaluationResults,\n scoreResults: {\n value: avgScore\n }\n }\n }\n\n if (resultsType === \"binary\") {\n const binaryResults = evaluationResults.reduce(\n (acc, { score }) => {\n if (score >= 0) {\n acc.trueCount++\n } else {\n acc.falseCount++\n }\n return acc\n },\n { trueCount: 0, falseCount: 0 }\n )\n\n resultObject = {\n results: evaluationResults,\n binaryResults\n }\n }\n\n if (!resultObject) throw new Error(\"No result object was created\")\n\n return resultObject as unknown as EvaluationResponse<T>\n }\n\n execute.evalType = \"model-graded\" as const\n\n return execute\n}\n","import { ResultsType } from \"@/types\"\n\nexport const CUSTOM_EVALUATOR_IDENTITY =\n \"You are an AI evaluator tasked with scoring a language model's responses. You'll be presented with a 'prompt:' and 'response:' pair (and optionally an 'expectedResponse') and should evaluate based on the criteria provided in the subsequent system prompts. Provide only a numerical score in the range defined, not a descriptive response and no other prose.\"\n\nexport const RESPONSE_TYPE_EVALUATOR_SCORE =\n \"Your task is to provide a numerical score ranging from 0 to 1 based on the criteria in the subsequent system prompts. The score should precisely reflect the performance of the language model's response. Do not provide any text explanation or feedback, only the numerical score.\"\n\nexport const RESPONSE_TYPE_EVALUATOR_BINARY =\n \"Your task is to provide a binary score of either 0 or 1 based on the criteria in the subsequent system prompts. This should precisely reflect the language model's performance. Do not provide any text explanation or feedback, only a singular digit: 1 or 0.\"\n\nexport const RESULTS_TYPE_PROMPT: Record<ResultsType, string> = {\n score: RESPONSE_TYPE_EVALUATOR_SCORE,\n binary: RESPONSE_TYPE_EVALUATOR_BINARY\n}\n","import { AccuracyEvaluator, EvaluationResponse, Evaluator, ExecuteEvalParams } from \"@/types\"\n\n/**\n * @name createWeightedEvaluator\n * @description\n * Create a weighted evaluator that combines the results of multiple evaluators\n * @param evaluators - A record of evaluators to combine\n * @param weights - A record of weights for each evaluator\n * @returns A weighted evaluator\n */\nexport function createWeightedEvaluator({\n evaluators,\n weights\n}: {\n evaluators: Record<string, Evaluator<\"score\"> | AccuracyEvaluator>\n weights: Record<string, number>\n}): Evaluator<\"score\"> {\n const totalWeight = Object.values(weights).reduce((sum, weight) => sum + weight, 0)\n if (totalWeight !== 1) {\n throw new Error(\"The sum of weights must be 1\")\n }\n\n if (\n Object.keys(weights).length !== Object.keys(evaluators).length ||\n !Object.keys(weights).every(key => key in evaluators)\n ) {\n throw new Error(\"Each evaluator must have a corresponding weight and vice versa.\")\n }\n\n const execute = async ({ data }: ExecuteEvalParams): Promise<EvaluationResponse<\"score\">> => {\n const evaluationResults = await Promise.all(\n data.map(async item => {\n const {\n prompt = \"\",\n completion,\n expectedCompletion = \"\",\n contexts = [],\n groundTruth = \"\"\n } = item\n\n const evaluatorResults = await Promise.all(\n Object.keys(evaluators).map(async key => {\n const evaluator = evaluators[key]\n\n const isAccuracyEvaluator = evaluator.evalType === \"accuracy\"\n const isModelGradedEvaluator = evaluator.evalType === \"model-graded\"\n const isContextEvaluator = evaluator.evalType?.startsWith(\"context-\")\n\n if (isAccuracyEvaluator) {\n console.log(`Evaluating ${key} with accuracy`)\n } else if (isModelGradedEvaluator) {\n console.log(`Evaluating ${key} with model-graded`)\n } else if (isContextEvaluator) {\n console.log(`Evaluating ${key} with ${evaluator.evalType}`)\n }\n\n try {\n const result = isAccuracyEvaluator\n ? await (evaluator as AccuracyEvaluator)({\n data: [{ completion, expectedCompletion }]\n })\n : await (evaluator as Evaluator<\"score\">)({\n data: [\n {\n prompt,\n completion,\n expectedCompletion,\n contexts,\n groundTruth\n }\n ]\n })\n\n return result?.scoreResults?.value !== undefined\n ? {\n score: result?.scoreResults?.value,\n evaluator: key,\n evaluatorType: evaluator.evalType\n }\n : undefined\n } catch (error) {\n console.error(`Error evaluating ${key}:`, error)\n return undefined\n }\n })\n )\n\n const validResults = evaluatorResults.filter(\n (e): e is NonNullable<typeof e> => e !== undefined\n )\n\n if (validResults.length === 0) {\n console.warn(\"No valid results for\", item)\n return {\n score: NaN,\n scores: [],\n item\n }\n }\n\n const weightedScore = Object.keys(weights).reduce(\n (sum, key, index) => sum + weights[key] * (validResults?.[index]?.score ?? 0),\n 0\n )\n\n return {\n score: weightedScore,\n scores: validResults,\n item\n }\n })\n )\n\n const validResults = evaluationResults.filter(\n (e): e is NonNullable<typeof e> => !isNaN(e.score)\n )\n\n const weightedScore =\n validResults.length > 0\n ? validResults.reduce((sum, { score = 0 }) => sum + score, 0) / validResults.length\n : 0\n\n const individualAvgScores = Object.keys(evaluators).reduce(\n (acc, key) => {\n const scores = validResults.map(vr => vr.scores.find(s => s.evaluator === key)?.score ?? 0)\n const avgScore = scores.reduce((sum, score) => sum + score, 0) / scores.length\n acc[key] = avgScore\n\n return acc\n },\n {} as Record<string, number>\n )\n\n return {\n results: validResults.map(er => ({\n ...er,\n score: er.score\n })),\n scoreResults: {\n value: weightedScore,\n individual: individualAvgScores\n }\n }\n }\n\n execute.evalType = \"weighted\" as const\n return execute\n}\n","import { z } from \"zod\"\n\nexport const BaseEvaluationDataItemSchema = z.object({\n prompt: z.string().optional(),\n completion: z.string(),\n expectedCompletion: z.string().optional(),\n contexts: z.array(z.string()).optional(),\n groundTruth: z.string().optional()\n})\n\nexport const EvaluationDataItemSchema = BaseEvaluationDataItemSchema\n\nexport const EvaluationDataItemResultSchema = z.object({\n score: z.number(),\n scores: z\n .array(\n z.object({\n score: z.number(),\n evaluator: z.string(),\n evaluatorType: z.string()\n })\n )\n .optional(),\n item: EvaluationDataItemSchema\n})\n\nexport type ResultsType = \"score\" | \"binary\"\nexport type BinaryResults = {\n trueCount: number\n falseCount: number\n}\n\nexport type AvgScoreResults = {\n value: number\n individual?: Record<string, number>\n}\n\nexport type EvaluationDataItem = z.infer<typeof EvaluationDataItemSchema>\nexport type EvaluationDataItemResult = z.infer<typeof EvaluationDataItemResultSchema>\n\nexport type EvaluationResponse<T extends ResultsType> = {\n results: EvaluationDataItemResult[]\n} & (T extends \"score\" ? { scoreResults: AvgScoreResults } : { binaryResults: BinaryResults })\n\nexport type ExecuteEvalParams = { data: EvaluationDataItem[] }\n\ninterface EvalFunction extends Function {\n evalType: \"model-graded\" | \"accuracy\" | `context-${ContextEvaluatorType}` | \"weighted\"\n}\n\nexport type _Evaluator<T extends ResultsType> = ({\n data\n}: ExecuteEvalParams) => Promise<EvaluationResponse<T>>\n\nexport interface Evaluator<T extends ResultsType> extends _Evaluator<T>, EvalFunction {}\n\nexport type ContextEvaluatorType = \"entities-recall\" | \"precision\" | \"recall\" | \"relevance\"\n\nexport type ContextEvaluator = Evaluator<\"score\">\n\nexport type AccuracyEvaluator = Evaluator<\"score\">\n","import { EvaluationResponse, Evaluator } from \"@/types\"\nimport { distance } from \"fastest-levenshtein\"\nimport OpenAI from \"openai\"\n\nimport { cosineSimilarity } from \"@/lib/cosine\"\n\nexport function createAccuracyEvaluator({\n model,\n weights = { factual: 0.5, semantic: 0.5 }\n}: {\n model?: OpenAI.Embeddings.EmbeddingCreateParams[\"model\"]\n weights?: { factual: number; semantic: number }\n}): Evaluator<\"score\"> {\n const execute = async ({\n data\n }: {\n data: { completion: string; expectedCompletion?: string }[]\n }): Promise<EvaluationResponse<\"score\">> => {\n const openai = new OpenAI({ apiKey: process.env[\"OPENAI_API_KEY\"] })\n\n const evaluationResults = await Promise.all(\n data.map(async item => {\n const { completion, expectedCompletion } = item\n\n if (!completion || !expectedCompletion) {\n console.warn(\"Completion or expected completion is missing.\")\n return undefined\n }\n\n try {\n const factualDistance = distance(completion, expectedCompletion)\n const factualScore =\n 1 - factualDistance / Math.max(completion.length, expectedCompletion.length)\n\n const [completionEmbedding, expectedEmbedding] = await Promise.all([\n openai.embeddings.create({\n input: [completion],\n model: model ?? \"text-embedding-ada-002\"\n }),\n openai.embeddings.create({\n input: [expectedCompletion],\n model: model ?? \"text-embedding-ada-002\"\n })\n ])\n\n const semanticScore = cosineSimilarity(\n completionEmbedding.data[0].embedding,\n expectedEmbedding.data[0].embedding\n )\n\n const score = weights.factual * factualScore + weights.semantic * semanticScore\n\n return {\n item: {\n completion,\n expectedCompletion\n },\n score\n }\n } catch (error) {\n console.error(\"Error in accuracy evaluation:\", error)\n return undefined\n }\n })\n )\n\n const validResults = evaluationResults.filter(\n (e): e is NonNullable<typeof e> => e !== undefined\n )\n\n const avgScore =\n validResults.length > 0\n ? validResults.reduce((sum, { score }) => sum + score, 0) / validResults.length\n : 0\n\n return {\n results: validResults,\n scoreResults: {\n value: avgScore\n }\n }\n }\n\n execute.evalType = \"accuracy\" as const\n return execute\n}\n","/**\n * Calculate the dot product of two vectors\n * @param a - First vector\n * @param b - Second vector\n * @returns The dot product\n */\nfunction dot(a: number[], b: number[]): number {\n return a.reduce((sum, val, index) => sum + val * b[index], 0)\n}\n\n/**\n * Calculate the cosine similarity between two vectors\n * @param a - First vector\n * @param b - Second vector\n * @returns The cosine similarity (0 to 1)\n */\nexport function cosineSimilarity(a: number[], b: number[]): number {\n const dotProduct = dot(a, b)\n const magnitudeA = Math.sqrt(dot(a, a))\n const magnitudeB = Math.sqrt(dot(b, b))\n return magnitudeA && magnitudeB ? dotProduct / (magnitudeA * magnitudeB) : 0\n}\n","import {\n ContextEvaluator,\n ContextEvaluatorType,\n EvaluationResponse,\n ExecuteEvalParams\n} from \"@/types\"\nimport { distance as levenshteinDistance } from \"fastest-levenshtein\"\nimport OpenAI from \"openai\"\n\nimport { cosineSimilarity } from \"@/lib/cosine\"\n\nfunction extractEntities(text: string): string[] {\n return text.match(/\\b[A-Z][a-z]*\\b/g) || []\n}\n\nexport function createContextEvaluator({\n type,\n model = \"text-embedding-3-small\"\n}: {\n type: ContextEvaluatorType\n model?: OpenAI.Embeddings.EmbeddingCreateParams[\"model\"]\n}): ContextEvaluator {\n const execute = async ({ data }: ExecuteEvalParams): Promise<EvaluationResponse<\"score\">> => {\n const openai = new OpenAI({ apiKey: process.env[\"OPENAI_API_KEY\"] })\n\n const evaluationResults = await Promise.all(\n data.map(async item => {\n const { prompt, contexts = [], groundTruth = \"\", completion } = item\n\n let score = 0\n\n switch (type) {\n case \"entities-recall\": {\n if (!completion) {\n throw new Error(\"Completion is required for entities-recall evaluation.\")\n }\n\n const completionEntities = extractEntities(completion)\n const contextEntities = contexts.flatMap(context => extractEntities(context))\n const intersection = contextEntities.filter(entity =>\n completionEntities.includes(entity)\n )\n score = intersection.length / completionEntities.length\n break\n }\n\n case \"precision\": {\n if (!completion) {\n throw new Error(\"Completion is required for precision evaluation.\")\n }\n const completionEmbeddingInput = prompt ? `${prompt} ${completion}` : completion\n const completionEmbedding = await openai.embeddings.create({\n input: [completionEmbeddingInput],\n model\n })\n const groundTruthEmbedding = groundTruth\n ? await openai.embeddings.create({\n input: [groundTruth],\n model\n })\n : null\n const contextEmbeddings = await Promise.all(\n contexts.map(context => openai.embeddings.create({ input: [context], model }))\n )\n\n let truePositives = 0\n let falsePositives = 0\n const precisionAtK = contextEmbeddings.map(embeddingResult => {\n const completionSimilarity = cosineSimilarity(\n completionEmbedding.data[0].embedding,\n embeddingResult.data[0].embedding\n )\n const groundTruthSimilarity = groundTruthEmbedding\n ? cosineSimilarity(\n groundTruthEmbedding.data[0].embedding,\n embeddingResult.data[0].embedding\n )\n : 0\n\n const maxSimilarity = Math.max(completionSimilarity, groundTruthSimilarity)\n\n if (maxSimilarity > 0.5) {\n truePositives += 1\n } else {\n falsePositives += 1\n }\n\n const precisionAtIndex = truePositives / (truePositives + falsePositives)\n return precisionAtIndex\n })\n\n score = precisionAtK.reduce((sum, precision) => sum + precision, 0) / contexts.length\n break\n }\n\n case \"recall\": {\n if (!completion) {\n throw new Error(\"Completion is required for recall evaluation.\")\n }\n const completionSentences = completion\n .split(\".\")\n .map(sentence => sentence.trim())\n .filter(Boolean)\n const contextSentences = contexts.flatMap(context =>\n context.split(\".\").map(sentence => sentence.trim())\n )\n\n const sentenceIntersection = completionSentences.filter(sentence =>\n contextSentences.some(contextSentence => {\n const levDistance = levenshteinDistance(sentence, contextSentence)\n return levDistance < Math.max(sentence.length, contextSentence.length) * 0.5\n })\n )\n\n score =\n completionSentences.length > 0\n ? sentenceIntersection.length / completionSentences.length\n : 0\n break\n }\n\n case \"relevance\": {\n if (!completion) {\n throw new Error(\"Completion is required for relevance evaluation.\")\n }\n const embeddingInput = prompt && completion ? `${prompt} ${completion}` : completion\n const completionEmbedding = await openai.embeddings.create({\n input: [embeddingInput],\n model\n })\n const contextEmbeddings = await Promise.all(\n contexts.map(context => openai.embeddings.create({ input: [context], model }))\n )\n const relevanceScores = contextEmbeddings.map(embedding =>\n cosineSimilarity(completionEmbedding.data[0].embedding, embedding.data[0].embedding)\n )\n score = relevanceScores.reduce((sum, score) => sum + score, 0) / relevanceScores.length\n break\n }\n\n default:\n throw new Error(`Unsupported evaluation type: ${type}`)\n }\n\n return {\n item,\n score\n }\n })\n )\n\n const avgScore =\n evaluationResults.reduce((sum, { score }) => sum + score, 0) / evaluationResults.length\n\n return {\n results: evaluationResults,\n scoreResults: { value: avgScore }\n }\n }\n\n execute.evalType = `context-${type}` as const\n return execute\n}\n"]}