-
Notifications
You must be signed in to change notification settings - Fork 19
Expand file tree
/
Copy pathpackage.json
More file actions
125 lines (125 loc) · 20.5 KB
/
package.json
File metadata and controls
125 lines (125 loc) · 20.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
{
"name": "promptpex",
"version": "0.0.18",
"author": "Microsoft",
"license": "MIT",
"description": "PromptPex is a test generator for prompts, that allows evaluating and comparing AI prompts across different models and configurations.",
"dependencies": {
"genaiscript": "^2.5.1"
},
"devDependencies": {
"@types/node": "^25.2.0",
"openai": "^6.17.0",
"prettier": "^3.8.1",
"zx": "^8.8.5"
},
"keywords": [
"genai",
"prompt",
"test",
"generator",
"evaluation",
"comparison"
],
"engines": {
"node": ">=22.15.0"
},
"bin": "./bin.mjs",
"files": [
"README.md",
"bin.mjs",
"src/genaisrc/promptpex.genai.mts",
"src/genaisrc/src/**",
"src/prompts/**"
],
"scripts": {
"all-samples": "zx samples/run-samples.zx.mjs",
"all-samples-gen": "zx samples/run-samples-gen.zx.mjs",
"all-samples-run": "zx samples/run-samples-run.zx.mjs",
"all-samples-eval": "zx samples/run-samples-eval.zx.mjs",
"postinstall": "genaiscript scripts fix",
"postupdate": "genaiscript scripts fix",
"upgrade": "npx --yes npm-check-updates -u && npm install",
"install:force": "rm package-lock.json && npm run install && cd docs && npm run install:force",
"genaiscript": "genaiscript",
"configure": "genaiscript configure action promptpex",
"az:login": "az login --scope api://trapi/.default --use-device-code",
"genai": "genaiscript run",
"gcm": "genaiscript run gcm -m github:openai/gpt-4.1 --no-run-trace --no-output-trace",
"gcm:ollama": "genaiscript run gcm -m ollama:gemma3:27b --no-run-trace --no-output-trace",
"gcm:azure": "genaiscript run gcm -m azure:gpt-4o_2024-11-20 --no-run-trace --no-output-trace",
"prd": "genaiscript run prd -prd -m github:openai/gpt-4.1 --no-run-trace --no-output-trace",
"serve": "genaiscript serve",
"serve:github": "genaiscript serve -p github --env .github.env",
"typecheck": "genaiscript scripts compile",
"build": "genaiscript scripts compile",
"build:schemas": "cp src/genaisrc/src/prompty-frontmatter.json docs/public/schemas/prompty-frontmatter.json",
"build:docs": "npm run build:schemas && cd docs && npm run build",
"promptpex": "genaiscript run promptpex",
"promptpex:azure": "genaiscript run promptpex --vars \"compliance=true\" --model \"azure:gpt-4o_2024-11-20\" --vars \"modelsUnderTest=azure:gpt-4o-mini_2024-07-18\" --vars cache=true --vars evalCache=true --vars testRunCache=true",
"dev": "genaiscript run dev",
"dev:fabric": "genaiscript run dev --vars \"fabric=v1.4.149\" --vars \"samplePrompts=1\"",
"lint": "prettier --write src/**/*.mts",
"test": "genaiscript run test --env .test.env --model github:openai/gpt-4.1-mini",
"docs": "npm run build:schemas && cd docs && npm run dev",
"promptpex:bare:azure": "genaiscript run promptpex \"samples/demo/bare.prompty\" --vars \"compliance=true\" --model \"azure:gpt-4o_2024-11-20\" --vars \"modelsUnderTest=azure:gpt-4o-mini_2024-07-18\" --vars cache=true --vars evalCache=true --vars testRunCache=true --vars out=evals",
"promptpex:joke:github": "genaiscript run promptpex \"samples/demo/joke.prompty\" --model \"github:openai/gpt-4o\" --vars \"groundtruthModel=github:openai/gpt-4.1\" --vars \"modelsUnderTest=github:openai/gpt-4.1-mini\" --vars cache=true --vars effort=min --vars evals=true --vars evalCache=true --vars testRunCache=true --vars out=evals --env .github.env",
"promptpex:joke:azure": "genaiscript run promptpex \"samples/demo/joke.prompty\" --model \"azure:gpt-4o_2024-11-20\" --vars \"modelsUnderTest=azure:gpt-4o-mini_2024-07-18\" --vars cache=true --vars evalCache=true --vars testRunCache=true --vars out=evals",
"promptpex:joke:azurefoundry": "genaiscript run promptpex \"samples/demo/joke.prompty\" --vars cache=true --vars evalCache=true --vars testRunCache=true --vars out=evals --vars storeCompletions=true",
"promptpex:demo:ollama": "genaiscript run promptpex \"samples/demo/demo.prompty\" --vars \"compliance=true\" --vars \"modelsUnderTest=ollama:llama3.2:1b;ollama:qwen2.5:3b\" --vars cache=true --vars evalCache=true --vars testRunCache=true --vars out=evals",
"promptpex:demo:github": "genaiscript run promptpex \"samples/demo/demo.prompty\" --env .github.env --vars \"compliance=true\" --model \"github:openai/gpt-4o\" --vars \"modelsUnderTest=github:openai/gpt-4o-mini;github:microsoft/Phi-4-mini-instruct\" --vars cache=true --vars evalCache=true --vars testRunCache=true",
"promptpex:demo:models": "genaiscript run promptpex \"samples/demo/demo.prompty\" --vars evalModel=github:openai/gpt-4o --vars \"groundtruthModel=github:openai/gpt-4.1\" --vars \"modelsUnderTest=github:openai/gpt-4.1-nano\" --vars cache=true --vars evalCache=true --vars testRunCache=true --vars out=evals --env .github.env --no-run-trace",
"promptpex:explainer:models": "genaiscript run promptpex \"samples/github-models/explainer.prompt.yml\" --vars evalModel=github:openai/gpt-4o --vars \"groundtruthModel=github:openai/gpt-4.1\" --vars \"modelsUnderTest=github:openai/gpt-4.1-nano\" --vars cache=true --vars evalCache=true --vars testRunCache=true --vars out=evals --env .github.env",
"promptpex:summarizer:models": "genaiscript run promptpex \"samples/github-models/summarizer.prompt.yml\" --vars evalModel=github:openai/gpt-4o --vars \"groundtruthModel=github:openai/gpt-4.1\" --vars \"modelsUnderTest=github:openai/gpt-4.1-nano\" --vars cache=true --vars evalCache=true --vars testRunCache=true --vars out=evals --env .github.env --no-run-trace",
"promptpex:demo:azure": "genaiscript run promptpex \"samples/demo/demo.prompty\" --vars \"compliance=true\" --model \"azure:gpt-4o_2024-11-20\" --vars \"modelsUnderTest=azure:gpt-4o-mini_2024-07-18\" --vars cache=true --vars evalCache=true --vars testRunCache=true",
"promptpex:demo:paper:azure": "genaiscript run paper \"samples/demo/demo.prompty\" --vars \"evals=true\" --vars \"modelsUnderTest=azure:gpt-4o-mini_2024-07-18;ollama:gemma2:9b\" --vars \"out=evals/paper-demo\"",
"promptpex:speech-tag:github": "genaiscript run paper \"samples/speech-tag/speech-tag.prompty\" --vars \"evals=true\" --vars \"modelsUnderTest=github:gpt-4o-mini;github:Phi-4-mini-instruct\"",
"promptpex:speech-tag:azure": "genaiscript run promptpex \"samples/speech-tag/speech-tag.prompty\" --vars \"evals=true\" --model \"azure:gpt-4o_2024-11-20\" --vars \"modelsUnderTest=azure:gpt-4o-mini_2024-07-18\" --vars out=evals",
"promptpex:speech-tag-multi:azure": "genaiscript run promptpex \"samples/speech-tag/speech-tag-multi.prompty\" --vars \"evals=true\" --model \"azure:gpt-4o_2024-11-20\" --vars \"modelsUnderTest=azure:gpt-4o-mini_2024-07-18\" --vars out=evals",
"promptpex:paper-newm-newp": "genaiscript run paper \"samples/speech-tag/speech-tag.prompty\" \"samples/text-to-p/text-to-p.prompty\" \"samples/openai-examples/elements.prompty\" \"samples/big-prompt-lib/art-prompt.prompty\" \"samples/prompt-guide/extract-names.prompty\" \"samples/text-classification/classify-input-text.prompty\" \"samples/big-prompt-lib/sentence-rewrite.prompty\" \"samples/azure-ai-studio/shakespearean-writing-assistant.prompty\" \"samples/big-prompt-lib/wine-expert.prompty\" \"samples/big-prompt-lib/text-to-emoji.prompty\" \"samples/big-prompt-lib/topic-breakdown.prompty\" \"samples/big-prompt-lib/shield-challenge.prompty\" \"samples/big-prompt-lib/survival.prompty\" \"samples/big-prompt-lib/email-responder.prompty\" \"samples/hugging-face/llm-as-judge.prompty\" --vars \"evals=true\" --vars \"modelsUnderTest=ollama:deepseek-r1:32b;azure:gpt-4o-mini_2024-07-18;ollama:qwen2.5:3b\" --vars \"out=evals/p-newm-newp-02-14\"",
"promptpex:paper-speech-tag": "genaiscript run paper \"samples/speech-tag/speech-tag.prompty\" --vars \"evals=true\" --vars \"modelsUnderTest=azure:gpt-4o-mini_2024-07-18;ollama:gemma2:9b;ollama:qwen2.5:3b;ollama:llama3.2:1b\" --vars \"out=evals/paper-speech-tag\"",
"promptpex:paper-speech-tag-m": "genaiscript run paper \"samples/speech-tag/speech-tag-multi.prompty\" --vars \"evals=true\" --vars \"modelsUnderTest=azure:gpt-4o-mini_2024-07-18\" --vars \"out=evals/paper-speech-tag-m\"",
"promptpex:paper-speech-tag-ex": "genaiscript run promptpex \"samples/speech-tag/speech-tag.prompty\" --vars \"evals=false\" --vars \"testExpansions=3\" --vars \"modelsUnderTest=azure:gpt-4o-mini_2024-07-18\" --vars \"out=evals/paper-speech-tag-ex\"",
"promptpex:paper-speech-tag-tplus": "genaiscript run paper \"samples/speech-tag/speech-tag.prompty\" --vars \"splitRules=true\" --vars \"maxRulesPerTestGeneration=5\" --vars \"testGenerations=1\" --vars \"evals=true\" --vars \"modelsUnderTest=azure:gpt-4o-mini_2024-07-18;ollama:gemma2:9b;ollama:qwen2.5:3b;ollama:llama3.2:1b\" --vars \"out=evals/paper-speech-tag-tplus\"",
"promptpex:paper-speech-tag-cache": "genaiscript run paper \"samples/speech-tag/speech-tag.prompty\" --vars \"evals=true\" --vars \"cache=true\" --vars \"modelsUnderTest=azure:gpt-4o-mini_2024-07-18;ollama:gemma2:9b;ollama:qwen2.5:3b;ollama:llama3.2:1b\" --vars \"out=evals/paper-speech-tag-tplus\"",
"promptpex:paper-speech-tag-1tpr-3rpt": "genaiscript run paper \"samples/speech-tag/speech-tag.prompty\" --vars \"evals=true\" --vars \"modelsUnderTest=azure:gpt-4o-mini_2024-07-18;ollama:gemma2:9b;ollama:qwen2.5:3b;ollama:llama3.2:1b\" --vars \"out=evals/paper-speech-tag-1tpr-3rpt\" --vars \"testsPerRule=1\" --vars \"runsPerTest=5\" ",
"promptpex:paper-speech-tag-4o": "genaiscript run paper \"samples/speech-tag/speech-tag.prompty\" --vars \"evals=true\" --vars \"modelsUnderTest=ollama:phi4;ollama:qwen2.5:3b\" --vars \"out=evals/paper-speech-tag-4o\"",
"promptpex:speech-tag-evaltest": "genaiscript run promptpex \"samples/speech-tag/speech-tag.prompty\" --vars \"evals=true\" --vars \"createEvalRuns=true\" --vars \"modelsUnderTest=openai:gpt-4.1-nano-2025-04-14;openai:gpt-4o-mini-2024-07-18\" --vars \"splitRules=false\" --vars \"maxRulesPerTestGeneration=20\" --vars \"testGenerations=1\" --vars \"cache=true\" --vars \"testExpansions=0\" --vars \"out=evals/paper-speech-tag-evaltest\"",
"promptpex:paper-classify-input-text": "genaiscript run paper \"samples/text-classification/classify-input-text.prompty\" --vars \"evals=true\" --vars \"modelsUnderTest=azure:gpt-4o-mini_2024-07-18;ollama:gemma2:9b;ollama:qwen2.5:3b;ollama:llama3.2:1b\" --vars \"out=evals/paper-classify-input-text\"",
"promptpex:paper-art-prompt": "genaiscript run paper \"samples/big-prompt-lib/art-prompt.prompty\" --vars \"evals=true\" --vars \"modelsUnderTest=azure:gpt-4o-mini_2024-07-18;ollama:gemma2:9b;ollama:qwen2.5:3b;ollama:llama3.2:1b\" --vars \"out=evals/paper-art-prompt\"",
"promptpex:paper": "genaiscript run paper \"samples/speech-tag/speech-tag.prompty\" \"samples/text-to-p/text-to-p.prompty\" \"samples/openai-examples/elements.prompty\" \"samples/big-prompt-lib/art-prompt.prompty\" \"samples/prompt-guide/extract-names.prompty\" \"samples/text-classification/classify-input-text.prompty\" \"samples/big-prompt-lib/sentence-rewrite.prompty\" \"samples/azure-ai-studio/shakespearean-writing-assistant.prompty\" --vars \"evals=true\" --vars \"modelsUnderTest=azure:gpt-4o-mini_2024-07-18;ollama:gemma2:9b;ollama:qwen2.5:3b;ollama:llama3.2:1b\" --vars \"out=evals/paper\"",
"promptpex:paper-m": "genaiscript run paper \"samples/speech-tag/speech-tag-multi.prompty\" \"samples/speech-tag/speech-tag.prompty\" \"samples/text-to-p/text-to-p.prompty\" \"samples/openai-examples/elements.prompty\" \"samples/big-prompt-lib/art-prompt.prompty\" \"samples/prompt-guide/extract-names.prompty\" \"samples/text-classification/classify-input-text.prompty\" \"samples/big-prompt-lib/sentence-rewrite.prompty\" \"samples/azure-ai-studio/shakespearean-writing-assistant.prompty\" --vars baselineTests=false --vars \"evals=true\" --vars \"modelsUnderTest=azure:gpt-4o-mini_2024-07-18;ollama:gemma2:9b;ollama:qwen2.5:3b;ollama:llama3.2:1b\" --vars \"out=evals/paper-m\"",
"promptpex:paper-tplus": "genaiscript run paper \"samples/speech-tag/speech-tag.prompty\" \"samples/text-to-p/text-to-p.prompty\" \"samples/openai-examples/elements.prompty\" \"samples/big-prompt-lib/art-prompt.prompty\" \"samples/prompt-guide/extract-names.prompty\" \"samples/text-classification/classify-input-text.prompty\" \"samples/big-prompt-lib/sentence-rewrite.prompty\" \"samples/azure-ai-studio/shakespearean-writing-assistant.prompty\" --vars \"splitRules=true\" --vars \"maxRulesPerTestGeneration=5\" --vars \"testGenerations=1\" --vars \"evals=true\" --vars \"modelsUnderTest=azure:gpt-4o-mini_2024-07-18;ollama:gemma2:9b;ollama:qwen2.5:3b;ollama:llama3.2:1b\" --vars \"out=evals/paper-tplus\"",
"promptpex:test-st-min": "genaiscript run promptpex \"samples/speech-tag/speech-tag.prompty\" --vars \"effort=min\" --vars \"groundtruthModel=ollama:llama3.3\" --vars \"evalModel=ollama:llama3.3;ollama:qwen2.5:3b\" --vars \"evals=true\" --vars \"compliance=true\" --vars \"baselineTests=true\" --vars \"evalModelGroundtruth=azure:gpt-4.1-mini_2025-04-14;ollama:llama3.3\" --vars \"modelsUnderTest=ollama:qwen2.5:3b;ollama:llama3.2:1b\" --vars \"out=evals/test-st-min\"",
"promptpex:test-st-def:ollama": "genaiscript run promptpex \"samples/speech-tag/speech-tag.prompty\" --vars \"effort=min\" --vars \"out=evals/test-st-def\" --env .env.ollama",
"promptpex:test-st-mingt": "genaiscript run promptpex \"samples/speech-tag/speech-tag.prompty\" --vars \"effort=min\" --vars \"groundtruthModel=azure:gpt-4.1-mini_2025-04-14\" --vars \"evalModel=ollama:llama3.3\" --vars \"evals=true\" --vars \"compliance=true\" --vars \"baselineTests=false\" --vars \"modelsUnderTest=ollama:llama3.3\" --vars \"out=evals/test-st-mingt\"",
"promptpex:test-st-mediumgt": "genaiscript run promptpex \"samples/speech-tag/speech-tag-multi.prompty\" --vars \"effort=medium\" --vars \"groundtruthModel=azure:gpt-4.1-mini_2025-04-14\" --vars \"evalModel=ollama:llama3.3\" --vars \"evals=true\" --vars \"compliance=true\" --vars \"modelsUnderTest=azure:gpt-4.1-mini_2025-04-14;ollama:llama3.3\" --vars \"out=evals/test-st-mediumgt\"",
"promptpex:test-st-min:ollama": "genaiscript run promptpex \"samples/speech-tag/speech-tag.prompty\" --vars \"effort=min\" --vars \"groundtruthModel=azure:gpt-4.1-mini_2025-04-14\" --vars \"evals=true\" --vars \"modelsUnderTest=ollama:qwen2.5:3b;ollama:llama3.2:1b;ollama:llama3.3\" --vars \"compliance=false\" --vars \"baselineTests=false\" --vars \"evalModelGroundtruth=azure:gpt-4.1-mini_2025-04-14;ollama:llama3.3\" --vars \"out=evals/test-st-min\" --env .env.ollama",
"promptpex:test-st-min-gen:ollama": "genaiscript run promptpex \"samples/speech-tag/speech-tag.prompty\" --vars \"effort=min\" --vars \"groundtruthModel=azure:gpt-4.1-mini_2025-04-14\" --vars \"evals=false\" --vars \"compliance=false\" --vars \"baselineTests=false\" --vars \"evalModelGroundtruth=azure:gpt-4.1-mini_2025-04-14;ollama:llama3.3\" --vars \"out=evals/test-st-min-gen\" --env .env.ollama",
"promptpex:test-st-min-run:ollama": "genaiscript run promptpex \"evals/test-st-min-gen/speech-tag/promptpex_context.json\" --vars \"evals=false\" --vars \"compliance=false\" --vars \"baselineTests=false\" --vars \"modelsUnderTest=ollama:qwen2.5:3b;ollama:llama3.2:1b;ollama:llama3.3\" --vars \"out=evals/test-st-min-run --env .env.ollama\"",
"promptpex:test-st-min-eval:ollama": "genaiscript run promptpex \"evals/test-st-min-run/speech-tag/promptpex_context.json\" --vars \"evals=true\" --vars \"compliance=true\" --vars \"baselineTests=false\" --vars \"evalModel=azure:gpt-4.1-mini_2025-04-14\" --vars \"out=evals/test-st-min-eval\" --env .env.ollama",
"promptpex:test-st-min-eval1": "genaiscript run promptpex \"evals/test-st-min-run/speech-tag/promptpex_context.json\" --vars \"evals=true\" --vars \"compliance=true\" --vars \"baselineTests=false\" --vars \"evalModel=azure:gpt-4.1-mini_2025-04-14;ollama:llama3.3\" --vars \"out=evals/test-st-min-eval\" --env .env.ollama",
"promptpex:test-st-min-runeval:ollama": "genaiscript run promptpex \"evals/test-st-min-gen/speech-tag/promptpex_context.json\" --vars \"evals=true\" --vars \"compliance=true\" --vars \"baselineTests=false\" --vars \"modelsUnderTest=ollama:qwen2.5:3b;ollama:llama3.2:1b\" --vars \"evalModel=ollama:llama3.3\" --vars \"out=evals/test-st-min-runeval\" --env .env.ollama",
"promptpex:test-st-med": "genaiscript run promptpex \"samples/speech-tag/speech-tag.prompty\" --vars \"effort=medium\" --vars \"evals=true\" --vars \"compliance=true\" --vars \"baselineTests=false\" --vars \"modelsUnderTest=ollama:llama3.2:1b\" --vars \"out=evals/test-st-med\"",
"promptpex:test-headline": "genaiscript run promptpex \"samples/demo/rate-headline.prompty\" --vars \"effort=min\" --vars \"evals=true\" --vars \"compliance=true\" --vars \"baselineTests=false\" --vars \"modelsUnderTest=ollama:llama3.2:1b\" --vars \"out=evals/test-headline\"",
"promptpex:test1": "genaiscript run promptpex \"samples/demo/rate-headline.prompty\" --vars \"cache=true\" --vars \"evals=false\" --vars \"effort=min\" --vars \"baselineTests=true\" --vars \"filterTestCount=3\" --vars \"modelsUnderTest=ollama:llama3.2:1b\" --vars \"out=evals/test-test1\"",
"promptpex:inline": "genaiscript run promptpex --vars \"prompt=rate this summary from 1 to 10\" --vars \"effort=min\" --vars \"evals=false\" --vars \"out=evals/test-inline\"",
"promptpex:test-expand": "genaiscript run promptpex --vars \"prompt=dummy\" --vars \"testExpansions=1\" --vars \"evals=false\" --vars \"out=evals/test-expand\" --vars \"loadContext=true\" --vars \"loadContextFile=evals/dev/test-inline.promptpex_context.json\"",
"promptpex:test-load": "genaiscript run promptpex \"samples/demo/rate-headline.prompty\" --vars \"cache=true\" --vars \"evals=true\" --vars \"effort=min\" --vars \"baselineTests=true\" --vars \"rateTests=true\" --vars \"filterTestCount=3\" --vars \"modelsUnderTest=ollama:llama3.2:1b\" --vars \"loadContext=true\" --vars \"loadContextFile=evals/dev/rate-headline.promptpex_context.json\" --vars \"testExpansions=1\" --vars \"out=evals/test-load\"",
"promptpex:test-experience": "genaiscript run promptpex \"samples/demo/rate-customer-experience.prompty\" --vars \"effort=min\" --vars \"evals=true\" --vars \"compliance=true\" --vars \"baselineTests=false\" --vars \"modelsUnderTest=ollama:llama3.2:1b\" --vars \"out=evals/test-experience\"",
"promptpex:doc-min": "genaiscript run promptpex \"samples/speech-tag/speech-tag.prompty\" --vars \"effort=min\" --vars \"evalModel=ollama:llama3.3\" --vars \"evals=true\" --vars \"modelsUnderTest=ollama:llama3.3\" --vars \"out=evals/test-doc-min\"",
"promptpex:doc-gentest": "genaiscript run promptpex \"samples/speech-tag/speech-tag.prompty\" --vars \"effort=min\" --vars \"evals=false\" --vars \"out=evals/test-doc-gentest\"",
"promptpex:doc-gentestgt": "genaiscript run promptpex \"samples/speech-tag/speech-tag.prompty\" --vars \"groundtruthModel=ollama:llama3.3\" --vars \"effort=min\" --vars \"evals=false\"--vars \"out=evals/test-doc-gentestgt\"",
"promptpex:doc-runevalgt": "genaiscript run promptpex \"evals/test-doc-gentestgt/speech-tag/promptpex_context.json\" --vars \"evals=true\" --vars \"evalModel=ollama:llama3.3\" --vars \"modelsUnderTest=ollama:llama3.3\" --vars \"out=evals/test-doc-runevalgt\"",
"promptpex:doc-ratetests": "genaiscript run promptpex \"evals/test-doc-gentestgt/speech-tag/promptpex_context.json\" --vars \"evals=false\" \"rateTests=true\" --vars \"filterTestCount=3\" --vars \"out=evals/test-doc-ratetests\"",
"docker:serve": "npm run docker:serve:build && npm run docker:serve:run",
"docker:serve:build": "docker build -f Dockerfile.serve -t promptpex-serve .",
"docker:serve:run": "docker run --rm -p 8003:8003 promptpex-serve",
"docker:serve:stop": "docker stop $(docker ps -q --filter ancestor=promptpex-serve) 2>/dev/null || true",
"docker:server:msr": "docker run --env-file .promptpex.env -p 8003:8003 ghcr.io/microsoft/promptpex:v0.0.18",
"ollama": "npm run ollama:stop && npm run ollama:start",
"ollama:start": "docker run -d -v ollama:/root/.ollama -p 11434:11434 --name ollama -e OLLAMA_FLASH_ATTENTION=1 -e OLLAMA_KV_CACHE_TYPE=q8_0 ollama/ollama",
"ollama:stop": "docker stop ollama && docker rm ollama",
"start": "genaiscript run src/genaisrc/promptpex.genai.mts --github-workspace --no-run-trace --out-trace $GITHUB_STEP_SUMMARY"
}
}