1
export const DEFAULT_PROMPTS = `Your first prompt goes here
3
Next prompt goes here. You can substitute variables like this: {{var1}} {{var2}} {{var3}}
5
This is the next prompt.
7
These prompts are nunjucks templates, so you can use logic like this:
13
{"role": "system", "content": "This is another prompt. JSON is supported."},
14
{"role": "user", "content": "Using this format, you may construct multi-shot OpenAI prompts"}
15
{"role": "user", "content": "Variable substitution still works: {{ var3 }}"}
18
If you prefer, you can break prompts into multiple files (make sure to edit promptfooconfig.yaml accordingly)
21
export const DEFAULT_YAML_CONFIG = `# This configuration compares LLM output of 2 prompts x 2 GPT models across 3 test cases.
22
# Learn more: https://promptfoo.dev/docs/configuration/guide
23
description: 'My first eval'
26
- "Write a tweet about {{topic}}"
27
- "Write a very concise, funny tweet about {{topic}}"
30
- openai:gpt-3.5-turbo-0613
40
# For more information on assertions, see https://promptfoo.dev/docs/configuration/expected-outputs
44
value: 1 / (output.length + 1) # prefer shorter outputs
49
# For more information on model-graded evals, see https://promptfoo.dev/docs/configuration/expected-outputs/model-graded
51
value: ensure that the output is funny
54
export const DEFAULT_README = `To get started, set your OPENAI_API_KEY environment variable.
56
Next, edit promptfooconfig.yaml.
63
Afterwards, you can view the results by running \`promptfoo view\`