kiln_ai.adapters.prompt_builders

  1import json
  2from abc import ABCMeta, abstractmethod
  3from typing import Dict
  4
  5from kiln_ai.datamodel import PromptGenerators, PromptId, Task, TaskRun
  6from kiln_ai.utils.exhaustive_error import raise_exhaustive_enum_error
  7
  8
  9class BasePromptBuilder(metaclass=ABCMeta):
 10    """Base class for building prompts from tasks.
 11
 12    Provides the core interface and basic functionality for prompt builders.
 13    """
 14
 15    def __init__(self, task: Task):
 16        """Initialize the prompt builder with a task.
 17
 18        Args:
 19            task (Task): The task containing instructions and requirements.
 20        """
 21        self.task = task
 22
 23    def prompt_id(self) -> str | None:
 24        """Returns the ID of the prompt, scoped to this builder.
 25
 26        Returns:
 27            str | None: The ID of the prompt, or None if not set.
 28        """
 29        return None
 30
 31    def build_prompt(self, include_json_instructions) -> str:
 32        """Build and return the complete prompt string.
 33
 34        Returns:
 35            str: The constructed prompt.
 36        """
 37        prompt = self.build_base_prompt()
 38
 39        if include_json_instructions and self.task.output_schema():
 40            prompt = (
 41                prompt
 42                + f"\n\n# Format Instructions\n\nReturn a JSON object conforming to the following schema:\n```\n{self.task.output_schema()}\n```"
 43            )
 44
 45        return prompt
 46
 47    @abstractmethod
 48    def build_base_prompt(self) -> str:
 49        """Build and return the complete prompt string.
 50
 51        Returns:
 52            str: The constructed prompt.
 53        """
 54        pass
 55
 56    def build_user_message(self, input: Dict | str) -> str:
 57        """Build a user message from the input.
 58
 59        Args:
 60            input (Union[Dict, str]): The input to format into a message.
 61
 62        Returns:
 63            str: The formatted user message.
 64        """
 65        if isinstance(input, Dict):
 66            return f"The input is:\n{json.dumps(input, indent=2, ensure_ascii=False)}"
 67
 68        return f"The input is:\n{input}"
 69
 70    def chain_of_thought_prompt(self) -> str | None:
 71        """Build and return the chain of thought prompt string.
 72
 73        Returns:
 74            str: The constructed chain of thought prompt.
 75        """
 76        return None
 77
 78    def build_prompt_for_ui(self) -> str:
 79        """Build a prompt for the UI. It includes additional instructions (like chain of thought), even if they are passed to the model in stages.
 80
 81        Designed for end-user consumption, not for model consumption.
 82
 83        Returns:
 84            str: The constructed prompt string.
 85        """
 86        base_prompt = self.build_prompt(include_json_instructions=False)
 87        cot_prompt = self.chain_of_thought_prompt()
 88        if cot_prompt:
 89            base_prompt += "\n# Thinking Instructions\n\n" + cot_prompt
 90        return base_prompt
 91
 92
 93class SimplePromptBuilder(BasePromptBuilder):
 94    """A basic prompt builder that combines task instruction with requirements."""
 95
 96    def build_base_prompt(self) -> str:
 97        """Build a simple prompt with instruction and requirements.
 98
 99        Returns:
100            str: The constructed prompt string.
101        """
102        base_prompt = self.task.instruction
103
104        # TODO: this is just a quick version. Formatting and best practices TBD
105        if len(self.task.requirements) > 0:
106            base_prompt += (
107                "\n\nYour response should respect the following requirements:\n"
108            )
109            # iterate requirements, formatting them in numbereed list like 1) task.instruction\n2)...
110            for i, requirement in enumerate(self.task.requirements):
111                base_prompt += f"{i + 1}) {requirement.instruction}\n"
112
113        return base_prompt
114
115
116class MultiShotPromptBuilder(BasePromptBuilder):
117    """A prompt builder that includes multiple examples in the prompt."""
118
119    @classmethod
120    def example_count(cls) -> int:
121        """Get the maximum number of examples to include in the prompt.
122
123        Returns:
124            int: The maximum number of examples (default 25).
125        """
126        return 25
127
128    def build_base_prompt(self) -> str:
129        """Build a prompt with instruction, requirements, and multiple examples.
130
131        Returns:
132            str: The constructed prompt string with examples.
133        """
134        base_prompt = f"# Instruction\n\n{self.task.instruction}\n\n"
135
136        if len(self.task.requirements) > 0:
137            base_prompt += "# Requirements\n\nYour response should respect the following requirements:\n"
138            for i, requirement in enumerate(self.task.requirements):
139                base_prompt += f"{i + 1}) {requirement.instruction}\n"
140            base_prompt += "\n"
141
142        valid_examples = self.collect_examples()
143
144        if len(valid_examples) == 0:
145            return base_prompt
146
147        base_prompt += "# Example Outputs\n\n"
148        for i, example in enumerate(valid_examples):
149            base_prompt += self.prompt_section_for_example(i, example)
150
151        return base_prompt
152
153    def prompt_section_for_example(self, index: int, example: TaskRun) -> str:
154        # Prefer repaired output if it exists, otherwise use the regular output
155        output = example.repaired_output or example.output
156        return f"## Example {index + 1}\n\nInput: {example.input}\nOutput: {output.output}\n\n"
157
158    def collect_examples(self) -> list[TaskRun]:
159        valid_examples: list[TaskRun] = []
160        runs = self.task.runs(readonly=True)
161
162        # first pass, we look for repaired outputs. These are the best examples.
163        for run in runs:
164            if len(valid_examples) >= self.__class__.example_count():
165                break
166            if run.repaired_output is not None:
167                valid_examples.append(run)
168
169        # second pass, we look for high quality outputs (rating based)
170        # Minimum is "high_quality" (4 star in star rating scale), then sort by rating
171        # exclude repaired outputs as they were used above
172        runs_with_rating = [
173            run
174            for run in runs
175            if run.output.rating is not None
176            and run.output.rating.value is not None
177            and run.output.rating.is_high_quality()
178            and run.repaired_output is None
179        ]
180        runs_with_rating.sort(
181            key=lambda x: (x.output.rating and x.output.rating.value) or 0, reverse=True
182        )
183        for run in runs_with_rating:
184            if len(valid_examples) >= self.__class__.example_count():
185                break
186            valid_examples.append(run)
187        return valid_examples
188
189
190class FewShotPromptBuilder(MultiShotPromptBuilder):
191    """A prompt builder that includes a small number of examples in the prompt."""
192
193    @classmethod
194    def example_count(cls) -> int:
195        """Get the maximum number of examples to include in the prompt.
196
197        Returns:
198            int: The maximum number of examples (4).
199        """
200        return 4
201
202
203class RepairsPromptBuilder(MultiShotPromptBuilder):
204    """A prompt builder that includes multiple examples in the prompt, including repaired instructions describing what was wrong, and how it was fixed."""
205
206    def prompt_section_for_example(self, index: int, example: TaskRun) -> str:
207        if (
208            not example.repaired_output
209            or not example.repair_instructions
210            or not example.repaired_output.output
211        ):
212            return super().prompt_section_for_example(index, example)
213
214        prompt_section = f"## Example {index + 1}\n\nInput: {example.input}\n\n"
215        prompt_section += (
216            f"Initial Output Which Was Insufficient: {example.output.output}\n\n"
217        )
218        prompt_section += f"Instructions On How to Improve the Initial Output: {example.repair_instructions}\n\n"
219        prompt_section += (
220            f"Repaired Output Which is Sufficient: {example.repaired_output.output}\n\n"
221        )
222        return prompt_section
223
224
225def chain_of_thought_prompt(task: Task) -> str:
226    """Standard implementation to build and return the chain of thought prompt string.
227
228    Returns:
229        str: The constructed chain of thought prompt.
230    """
231
232    cot_instruction = task.thinking_instruction
233    if not cot_instruction:
234        cot_instruction = "Think step by step, explaining your reasoning."
235
236    return cot_instruction
237
238
239class SimpleChainOfThoughtPromptBuilder(SimplePromptBuilder):
240    """A prompt builder that includes a chain of thought prompt on top of the simple prompt."""
241
242    def chain_of_thought_prompt(self) -> str | None:
243        return chain_of_thought_prompt(self.task)
244
245
246class FewShotChainOfThoughtPromptBuilder(FewShotPromptBuilder):
247    """A prompt builder that includes a chain of thought prompt on top of the few shot prompt."""
248
249    def chain_of_thought_prompt(self) -> str | None:
250        return chain_of_thought_prompt(self.task)
251
252
253class MultiShotChainOfThoughtPromptBuilder(MultiShotPromptBuilder):
254    """A prompt builder that includes a chain of thought prompt on top of the multi shot prompt."""
255
256    def chain_of_thought_prompt(self) -> str | None:
257        return chain_of_thought_prompt(self.task)
258
259
260class SavedPromptBuilder(BasePromptBuilder):
261    """A prompt builder that looks up a static prompt."""
262
263    def __init__(self, task: Task, prompt_id: str):
264        super().__init__(task)
265        prompt_model = next(
266            (
267                prompt
268                for prompt in task.prompts(readonly=True)
269                if prompt.id == prompt_id
270            ),
271            None,
272        )
273        if not prompt_model:
274            raise ValueError(f"Prompt ID not found: {prompt_id}")
275        self.prompt_model = prompt_model
276
277    def prompt_id(self) -> str | None:
278        return self.prompt_model.id
279
280    def build_base_prompt(self) -> str:
281        """Returns a saved prompt.
282
283        Returns:
284            str: The prompt string.
285        """
286        return self.prompt_model.prompt
287
288    def chain_of_thought_prompt(self) -> str | None:
289        return self.prompt_model.chain_of_thought_instructions
290
291
292class TaskRunConfigPromptBuilder(BasePromptBuilder):
293    """A prompt builder that looks up a static prompt in a task run config."""
294
295    def __init__(self, task: Task, run_config_prompt_id: str):
296        parts = run_config_prompt_id.split("::")
297        if len(parts) != 4:
298            raise ValueError(
299                f"Invalid task run config prompt ID: {run_config_prompt_id}. Expected format: 'task_run_config::[project_id]::[task_id]::[run_config_id]'."
300            )
301
302        task_id = parts[2]
303        if task_id != task.id:
304            raise ValueError(
305                f"Task run config prompt ID: {run_config_prompt_id}. Task ID mismatch. Expected: {task.id}, got: {task_id}."
306            )
307
308        run_config_id = parts[3]
309        run_config = next(
310            (
311                run_config
312                for run_config in task.run_configs(readonly=True)
313                if run_config.id == run_config_id
314            ),
315            None,
316        )
317        if not run_config:
318            raise ValueError(
319                f"Task run config ID not found: {run_config_id} for prompt id {run_config_prompt_id}"
320            )
321        if run_config.prompt is None:
322            raise ValueError(
323                f"Task run config ID {run_config_id} does not have a stored prompt. Used as prompt id {run_config_prompt_id}"
324            )
325
326        # Load the prompt from the model
327        self.prompt = run_config.prompt.prompt
328        self.cot_prompt = run_config.prompt.chain_of_thought_instructions
329        self.id = run_config_prompt_id
330
331        super().__init__(task)
332
333    def prompt_id(self) -> str | None:
334        return self.id
335
336    def build_base_prompt(self) -> str:
337        return self.prompt
338
339    def chain_of_thought_prompt(self) -> str | None:
340        return self.cot_prompt
341
342
343class FineTunePromptBuilder(BasePromptBuilder):
344    """A prompt builder that looks up a fine-tune prompt."""
345
346    def __init__(self, task: Task, nested_fine_tune_id: str):
347        super().__init__(task)
348
349        # IDs are in project_id::task_id::fine_tune_id format
350        self.full_fine_tune_id = nested_fine_tune_id
351        parts = nested_fine_tune_id.split("::")
352        if len(parts) != 3:
353            raise ValueError(
354                f"Invalid fine-tune ID format. Expected 'project_id::task_id::fine_tune_id', got: {nested_fine_tune_id}"
355            )
356        fine_tune_id = parts[2]
357
358        fine_tune_model = next(
359            (
360                fine_tune
361                for fine_tune in task.finetunes(readonly=True)
362                if fine_tune.id == fine_tune_id
363            ),
364            None,
365        )
366        if not fine_tune_model:
367            raise ValueError(f"Fine-tune ID not found: {fine_tune_id}")
368        self.fine_tune_model = fine_tune_model
369
370    def prompt_id(self) -> str | None:
371        return self.full_fine_tune_id
372
373    def build_base_prompt(self) -> str:
374        return self.fine_tune_model.system_message
375
376    def chain_of_thought_prompt(self) -> str | None:
377        return self.fine_tune_model.thinking_instructions
378
379
380# Our UI has some names that are not the same as the class names, which also hint parameters.
381def prompt_builder_from_id(prompt_id: PromptId, task: Task) -> BasePromptBuilder:
382    """Convert a name used in the UI to the corresponding prompt builder class.
383
384    Args:
385        prompt_id (PromptId): The prompt ID.
386
387    Returns:
388        type[BasePromptBuilder]: The corresponding prompt builder class.
389
390    Raises:
391        ValueError: If the UI name is not recognized.
392    """
393
394    # Saved prompts are prefixed with "id::"
395    if prompt_id.startswith("id::"):
396        prompt_id = prompt_id[4:]
397        return SavedPromptBuilder(task, prompt_id)
398
399    # Task run config prompts are prefixed with "task_run_config::"
400    # task_run_config::[project_id]::[task_id]::[run_config_id]
401    if prompt_id.startswith("task_run_config::"):
402        return TaskRunConfigPromptBuilder(task, prompt_id)
403
404    # Fine-tune prompts are prefixed with "fine_tune_prompt::"
405    if prompt_id.startswith("fine_tune_prompt::"):
406        prompt_id = prompt_id[18:]
407        return FineTunePromptBuilder(task, prompt_id)
408
409    # Check if the prompt_id matches any enum value
410    if prompt_id not in [member.value for member in PromptGenerators]:
411        raise ValueError(f"Unknown prompt generator: {prompt_id}")
412    typed_prompt_generator = PromptGenerators(prompt_id)
413
414    match typed_prompt_generator:
415        case PromptGenerators.SIMPLE:
416            return SimplePromptBuilder(task)
417        case PromptGenerators.FEW_SHOT:
418            return FewShotPromptBuilder(task)
419        case PromptGenerators.MULTI_SHOT:
420            return MultiShotPromptBuilder(task)
421        case PromptGenerators.REPAIRS:
422            return RepairsPromptBuilder(task)
423        case PromptGenerators.SIMPLE_CHAIN_OF_THOUGHT:
424            return SimpleChainOfThoughtPromptBuilder(task)
425        case PromptGenerators.FEW_SHOT_CHAIN_OF_THOUGHT:
426            return FewShotChainOfThoughtPromptBuilder(task)
427        case PromptGenerators.MULTI_SHOT_CHAIN_OF_THOUGHT:
428            return MultiShotChainOfThoughtPromptBuilder(task)
429        case _:
430            # Type checking will find missing cases
431            raise_exhaustive_enum_error(typed_prompt_generator)
class BasePromptBuilder:
10class BasePromptBuilder(metaclass=ABCMeta):
11    """Base class for building prompts from tasks.
12
13    Provides the core interface and basic functionality for prompt builders.
14    """
15
16    def __init__(self, task: Task):
17        """Initialize the prompt builder with a task.
18
19        Args:
20            task (Task): The task containing instructions and requirements.
21        """
22        self.task = task
23
24    def prompt_id(self) -> str | None:
25        """Returns the ID of the prompt, scoped to this builder.
26
27        Returns:
28            str | None: The ID of the prompt, or None if not set.
29        """
30        return None
31
32    def build_prompt(self, include_json_instructions) -> str:
33        """Build and return the complete prompt string.
34
35        Returns:
36            str: The constructed prompt.
37        """
38        prompt = self.build_base_prompt()
39
40        if include_json_instructions and self.task.output_schema():
41            prompt = (
42                prompt
43                + f"\n\n# Format Instructions\n\nReturn a JSON object conforming to the following schema:\n```\n{self.task.output_schema()}\n```"
44            )
45
46        return prompt
47
48    @abstractmethod
49    def build_base_prompt(self) -> str:
50        """Build and return the complete prompt string.
51
52        Returns:
53            str: The constructed prompt.
54        """
55        pass
56
57    def build_user_message(self, input: Dict | str) -> str:
58        """Build a user message from the input.
59
60        Args:
61            input (Union[Dict, str]): The input to format into a message.
62
63        Returns:
64            str: The formatted user message.
65        """
66        if isinstance(input, Dict):
67            return f"The input is:\n{json.dumps(input, indent=2, ensure_ascii=False)}"
68
69        return f"The input is:\n{input}"
70
71    def chain_of_thought_prompt(self) -> str | None:
72        """Build and return the chain of thought prompt string.
73
74        Returns:
75            str: The constructed chain of thought prompt.
76        """
77        return None
78
79    def build_prompt_for_ui(self) -> str:
80        """Build a prompt for the UI. It includes additional instructions (like chain of thought), even if they are passed to the model in stages.
81
82        Designed for end-user consumption, not for model consumption.
83
84        Returns:
85            str: The constructed prompt string.
86        """
87        base_prompt = self.build_prompt(include_json_instructions=False)
88        cot_prompt = self.chain_of_thought_prompt()
89        if cot_prompt:
90            base_prompt += "\n# Thinking Instructions\n\n" + cot_prompt
91        return base_prompt

Base class for building prompts from tasks.

Provides the core interface and basic functionality for prompt builders.

BasePromptBuilder(task: kiln_ai.datamodel.Task)
16    def __init__(self, task: Task):
17        """Initialize the prompt builder with a task.
18
19        Args:
20            task (Task): The task containing instructions and requirements.
21        """
22        self.task = task

Initialize the prompt builder with a task.

Args: task (Task): The task containing instructions and requirements.

task
def prompt_id(self) -> str | None:
24    def prompt_id(self) -> str | None:
25        """Returns the ID of the prompt, scoped to this builder.
26
27        Returns:
28            str | None: The ID of the prompt, or None if not set.
29        """
30        return None

Returns the ID of the prompt, scoped to this builder.

Returns: str | None: The ID of the prompt, or None if not set.

def build_prompt(self, include_json_instructions) -> str:
32    def build_prompt(self, include_json_instructions) -> str:
33        """Build and return the complete prompt string.
34
35        Returns:
36            str: The constructed prompt.
37        """
38        prompt = self.build_base_prompt()
39
40        if include_json_instructions and self.task.output_schema():
41            prompt = (
42                prompt
43                + f"\n\n# Format Instructions\n\nReturn a JSON object conforming to the following schema:\n```\n{self.task.output_schema()}\n```"
44            )
45
46        return prompt

Build and return the complete prompt string.

Returns: str: The constructed prompt.

@abstractmethod
def build_base_prompt(self) -> str:
48    @abstractmethod
49    def build_base_prompt(self) -> str:
50        """Build and return the complete prompt string.
51
52        Returns:
53            str: The constructed prompt.
54        """
55        pass

Build and return the complete prompt string.

Returns: str: The constructed prompt.

def build_user_message(self, input: Union[Dict, str]) -> str:
57    def build_user_message(self, input: Dict | str) -> str:
58        """Build a user message from the input.
59
60        Args:
61            input (Union[Dict, str]): The input to format into a message.
62
63        Returns:
64            str: The formatted user message.
65        """
66        if isinstance(input, Dict):
67            return f"The input is:\n{json.dumps(input, indent=2, ensure_ascii=False)}"
68
69        return f"The input is:\n{input}"

Build a user message from the input.

Args: input (Union[Dict, str]): The input to format into a message.

Returns: str: The formatted user message.

def chain_of_thought_prompt(self) -> str | None:
71    def chain_of_thought_prompt(self) -> str | None:
72        """Build and return the chain of thought prompt string.
73
74        Returns:
75            str: The constructed chain of thought prompt.
76        """
77        return None

Build and return the chain of thought prompt string.

Returns: str: The constructed chain of thought prompt.

def build_prompt_for_ui(self) -> str:
79    def build_prompt_for_ui(self) -> str:
80        """Build a prompt for the UI. It includes additional instructions (like chain of thought), even if they are passed to the model in stages.
81
82        Designed for end-user consumption, not for model consumption.
83
84        Returns:
85            str: The constructed prompt string.
86        """
87        base_prompt = self.build_prompt(include_json_instructions=False)
88        cot_prompt = self.chain_of_thought_prompt()
89        if cot_prompt:
90            base_prompt += "\n# Thinking Instructions\n\n" + cot_prompt
91        return base_prompt

Build a prompt for the UI. It includes additional instructions (like chain of thought), even if they are passed to the model in stages.

Designed for end-user consumption, not for model consumption.

Returns: str: The constructed prompt string.

class SimplePromptBuilder(BasePromptBuilder):
 94class SimplePromptBuilder(BasePromptBuilder):
 95    """A basic prompt builder that combines task instruction with requirements."""
 96
 97    def build_base_prompt(self) -> str:
 98        """Build a simple prompt with instruction and requirements.
 99
100        Returns:
101            str: The constructed prompt string.
102        """
103        base_prompt = self.task.instruction
104
105        # TODO: this is just a quick version. Formatting and best practices TBD
106        if len(self.task.requirements) > 0:
107            base_prompt += (
108                "\n\nYour response should respect the following requirements:\n"
109            )
110            # iterate requirements, formatting them in numbereed list like 1) task.instruction\n2)...
111            for i, requirement in enumerate(self.task.requirements):
112                base_prompt += f"{i + 1}) {requirement.instruction}\n"
113
114        return base_prompt

A basic prompt builder that combines task instruction with requirements.

def build_base_prompt(self) -> str:
 97    def build_base_prompt(self) -> str:
 98        """Build a simple prompt with instruction and requirements.
 99
100        Returns:
101            str: The constructed prompt string.
102        """
103        base_prompt = self.task.instruction
104
105        # TODO: this is just a quick version. Formatting and best practices TBD
106        if len(self.task.requirements) > 0:
107            base_prompt += (
108                "\n\nYour response should respect the following requirements:\n"
109            )
110            # iterate requirements, formatting them in numbereed list like 1) task.instruction\n2)...
111            for i, requirement in enumerate(self.task.requirements):
112                base_prompt += f"{i + 1}) {requirement.instruction}\n"
113
114        return base_prompt

Build a simple prompt with instruction and requirements.

Returns: str: The constructed prompt string.

class MultiShotPromptBuilder(BasePromptBuilder):
117class MultiShotPromptBuilder(BasePromptBuilder):
118    """A prompt builder that includes multiple examples in the prompt."""
119
120    @classmethod
121    def example_count(cls) -> int:
122        """Get the maximum number of examples to include in the prompt.
123
124        Returns:
125            int: The maximum number of examples (default 25).
126        """
127        return 25
128
129    def build_base_prompt(self) -> str:
130        """Build a prompt with instruction, requirements, and multiple examples.
131
132        Returns:
133            str: The constructed prompt string with examples.
134        """
135        base_prompt = f"# Instruction\n\n{self.task.instruction}\n\n"
136
137        if len(self.task.requirements) > 0:
138            base_prompt += "# Requirements\n\nYour response should respect the following requirements:\n"
139            for i, requirement in enumerate(self.task.requirements):
140                base_prompt += f"{i + 1}) {requirement.instruction}\n"
141            base_prompt += "\n"
142
143        valid_examples = self.collect_examples()
144
145        if len(valid_examples) == 0:
146            return base_prompt
147
148        base_prompt += "# Example Outputs\n\n"
149        for i, example in enumerate(valid_examples):
150            base_prompt += self.prompt_section_for_example(i, example)
151
152        return base_prompt
153
154    def prompt_section_for_example(self, index: int, example: TaskRun) -> str:
155        # Prefer repaired output if it exists, otherwise use the regular output
156        output = example.repaired_output or example.output
157        return f"## Example {index + 1}\n\nInput: {example.input}\nOutput: {output.output}\n\n"
158
159    def collect_examples(self) -> list[TaskRun]:
160        valid_examples: list[TaskRun] = []
161        runs = self.task.runs(readonly=True)
162
163        # first pass, we look for repaired outputs. These are the best examples.
164        for run in runs:
165            if len(valid_examples) >= self.__class__.example_count():
166                break
167            if run.repaired_output is not None:
168                valid_examples.append(run)
169
170        # second pass, we look for high quality outputs (rating based)
171        # Minimum is "high_quality" (4 star in star rating scale), then sort by rating
172        # exclude repaired outputs as they were used above
173        runs_with_rating = [
174            run
175            for run in runs
176            if run.output.rating is not None
177            and run.output.rating.value is not None
178            and run.output.rating.is_high_quality()
179            and run.repaired_output is None
180        ]
181        runs_with_rating.sort(
182            key=lambda x: (x.output.rating and x.output.rating.value) or 0, reverse=True
183        )
184        for run in runs_with_rating:
185            if len(valid_examples) >= self.__class__.example_count():
186                break
187            valid_examples.append(run)
188        return valid_examples

A prompt builder that includes multiple examples in the prompt.

@classmethod
def example_count(cls) -> int:
120    @classmethod
121    def example_count(cls) -> int:
122        """Get the maximum number of examples to include in the prompt.
123
124        Returns:
125            int: The maximum number of examples (default 25).
126        """
127        return 25

Get the maximum number of examples to include in the prompt.

Returns: int: The maximum number of examples (default 25).

def build_base_prompt(self) -> str:
129    def build_base_prompt(self) -> str:
130        """Build a prompt with instruction, requirements, and multiple examples.
131
132        Returns:
133            str: The constructed prompt string with examples.
134        """
135        base_prompt = f"# Instruction\n\n{self.task.instruction}\n\n"
136
137        if len(self.task.requirements) > 0:
138            base_prompt += "# Requirements\n\nYour response should respect the following requirements:\n"
139            for i, requirement in enumerate(self.task.requirements):
140                base_prompt += f"{i + 1}) {requirement.instruction}\n"
141            base_prompt += "\n"
142
143        valid_examples = self.collect_examples()
144
145        if len(valid_examples) == 0:
146            return base_prompt
147
148        base_prompt += "# Example Outputs\n\n"
149        for i, example in enumerate(valid_examples):
150            base_prompt += self.prompt_section_for_example(i, example)
151
152        return base_prompt

Build a prompt with instruction, requirements, and multiple examples.

Returns: str: The constructed prompt string with examples.

def prompt_section_for_example(self, index: int, example: kiln_ai.datamodel.TaskRun) -> str:
154    def prompt_section_for_example(self, index: int, example: TaskRun) -> str:
155        # Prefer repaired output if it exists, otherwise use the regular output
156        output = example.repaired_output or example.output
157        return f"## Example {index + 1}\n\nInput: {example.input}\nOutput: {output.output}\n\n"
def collect_examples(self) -> list[kiln_ai.datamodel.TaskRun]:
159    def collect_examples(self) -> list[TaskRun]:
160        valid_examples: list[TaskRun] = []
161        runs = self.task.runs(readonly=True)
162
163        # first pass, we look for repaired outputs. These are the best examples.
164        for run in runs:
165            if len(valid_examples) >= self.__class__.example_count():
166                break
167            if run.repaired_output is not None:
168                valid_examples.append(run)
169
170        # second pass, we look for high quality outputs (rating based)
171        # Minimum is "high_quality" (4 star in star rating scale), then sort by rating
172        # exclude repaired outputs as they were used above
173        runs_with_rating = [
174            run
175            for run in runs
176            if run.output.rating is not None
177            and run.output.rating.value is not None
178            and run.output.rating.is_high_quality()
179            and run.repaired_output is None
180        ]
181        runs_with_rating.sort(
182            key=lambda x: (x.output.rating and x.output.rating.value) or 0, reverse=True
183        )
184        for run in runs_with_rating:
185            if len(valid_examples) >= self.__class__.example_count():
186                break
187            valid_examples.append(run)
188        return valid_examples
class FewShotPromptBuilder(MultiShotPromptBuilder):
191class FewShotPromptBuilder(MultiShotPromptBuilder):
192    """A prompt builder that includes a small number of examples in the prompt."""
193
194    @classmethod
195    def example_count(cls) -> int:
196        """Get the maximum number of examples to include in the prompt.
197
198        Returns:
199            int: The maximum number of examples (4).
200        """
201        return 4

A prompt builder that includes a small number of examples in the prompt.

@classmethod
def example_count(cls) -> int:
194    @classmethod
195    def example_count(cls) -> int:
196        """Get the maximum number of examples to include in the prompt.
197
198        Returns:
199            int: The maximum number of examples (4).
200        """
201        return 4

Get the maximum number of examples to include in the prompt.

Returns: int: The maximum number of examples (4).

class RepairsPromptBuilder(MultiShotPromptBuilder):
204class RepairsPromptBuilder(MultiShotPromptBuilder):
205    """A prompt builder that includes multiple examples in the prompt, including repaired instructions describing what was wrong, and how it was fixed."""
206
207    def prompt_section_for_example(self, index: int, example: TaskRun) -> str:
208        if (
209            not example.repaired_output
210            or not example.repair_instructions
211            or not example.repaired_output.output
212        ):
213            return super().prompt_section_for_example(index, example)
214
215        prompt_section = f"## Example {index + 1}\n\nInput: {example.input}\n\n"
216        prompt_section += (
217            f"Initial Output Which Was Insufficient: {example.output.output}\n\n"
218        )
219        prompt_section += f"Instructions On How to Improve the Initial Output: {example.repair_instructions}\n\n"
220        prompt_section += (
221            f"Repaired Output Which is Sufficient: {example.repaired_output.output}\n\n"
222        )
223        return prompt_section

A prompt builder that includes multiple examples in the prompt, including repaired instructions describing what was wrong, and how it was fixed.

def prompt_section_for_example(self, index: int, example: kiln_ai.datamodel.TaskRun) -> str:
207    def prompt_section_for_example(self, index: int, example: TaskRun) -> str:
208        if (
209            not example.repaired_output
210            or not example.repair_instructions
211            or not example.repaired_output.output
212        ):
213            return super().prompt_section_for_example(index, example)
214
215        prompt_section = f"## Example {index + 1}\n\nInput: {example.input}\n\n"
216        prompt_section += (
217            f"Initial Output Which Was Insufficient: {example.output.output}\n\n"
218        )
219        prompt_section += f"Instructions On How to Improve the Initial Output: {example.repair_instructions}\n\n"
220        prompt_section += (
221            f"Repaired Output Which is Sufficient: {example.repaired_output.output}\n\n"
222        )
223        return prompt_section
def chain_of_thought_prompt(task: kiln_ai.datamodel.Task) -> str:
226def chain_of_thought_prompt(task: Task) -> str:
227    """Standard implementation to build and return the chain of thought prompt string.
228
229    Returns:
230        str: The constructed chain of thought prompt.
231    """
232
233    cot_instruction = task.thinking_instruction
234    if not cot_instruction:
235        cot_instruction = "Think step by step, explaining your reasoning."
236
237    return cot_instruction

Standard implementation to build and return the chain of thought prompt string.

Returns: str: The constructed chain of thought prompt.

class SimpleChainOfThoughtPromptBuilder(SimplePromptBuilder):
240class SimpleChainOfThoughtPromptBuilder(SimplePromptBuilder):
241    """A prompt builder that includes a chain of thought prompt on top of the simple prompt."""
242
243    def chain_of_thought_prompt(self) -> str | None:
244        return chain_of_thought_prompt(self.task)

A prompt builder that includes a chain of thought prompt on top of the simple prompt.

def chain_of_thought_prompt(self) -> str | None:
243    def chain_of_thought_prompt(self) -> str | None:
244        return chain_of_thought_prompt(self.task)

Build and return the chain of thought prompt string.

Returns: str: The constructed chain of thought prompt.

class FewShotChainOfThoughtPromptBuilder(FewShotPromptBuilder):
247class FewShotChainOfThoughtPromptBuilder(FewShotPromptBuilder):
248    """A prompt builder that includes a chain of thought prompt on top of the few shot prompt."""
249
250    def chain_of_thought_prompt(self) -> str | None:
251        return chain_of_thought_prompt(self.task)

A prompt builder that includes a chain of thought prompt on top of the few shot prompt.

def chain_of_thought_prompt(self) -> str | None:
250    def chain_of_thought_prompt(self) -> str | None:
251        return chain_of_thought_prompt(self.task)

Build and return the chain of thought prompt string.

Returns: str: The constructed chain of thought prompt.

class MultiShotChainOfThoughtPromptBuilder(MultiShotPromptBuilder):
254class MultiShotChainOfThoughtPromptBuilder(MultiShotPromptBuilder):
255    """A prompt builder that includes a chain of thought prompt on top of the multi shot prompt."""
256
257    def chain_of_thought_prompt(self) -> str | None:
258        return chain_of_thought_prompt(self.task)

A prompt builder that includes a chain of thought prompt on top of the multi shot prompt.

def chain_of_thought_prompt(self) -> str | None:
257    def chain_of_thought_prompt(self) -> str | None:
258        return chain_of_thought_prompt(self.task)

Build and return the chain of thought prompt string.

Returns: str: The constructed chain of thought prompt.

class SavedPromptBuilder(BasePromptBuilder):
261class SavedPromptBuilder(BasePromptBuilder):
262    """A prompt builder that looks up a static prompt."""
263
264    def __init__(self, task: Task, prompt_id: str):
265        super().__init__(task)
266        prompt_model = next(
267            (
268                prompt
269                for prompt in task.prompts(readonly=True)
270                if prompt.id == prompt_id
271            ),
272            None,
273        )
274        if not prompt_model:
275            raise ValueError(f"Prompt ID not found: {prompt_id}")
276        self.prompt_model = prompt_model
277
278    def prompt_id(self) -> str | None:
279        return self.prompt_model.id
280
281    def build_base_prompt(self) -> str:
282        """Returns a saved prompt.
283
284        Returns:
285            str: The prompt string.
286        """
287        return self.prompt_model.prompt
288
289    def chain_of_thought_prompt(self) -> str | None:
290        return self.prompt_model.chain_of_thought_instructions

A prompt builder that looks up a static prompt.

SavedPromptBuilder(task: kiln_ai.datamodel.Task, prompt_id: str)
264    def __init__(self, task: Task, prompt_id: str):
265        super().__init__(task)
266        prompt_model = next(
267            (
268                prompt
269                for prompt in task.prompts(readonly=True)
270                if prompt.id == prompt_id
271            ),
272            None,
273        )
274        if not prompt_model:
275            raise ValueError(f"Prompt ID not found: {prompt_id}")
276        self.prompt_model = prompt_model

Initialize the prompt builder with a task.

Args: task (Task): The task containing instructions and requirements.

prompt_model
def prompt_id(self) -> str | None:
278    def prompt_id(self) -> str | None:
279        return self.prompt_model.id

Returns the ID of the prompt, scoped to this builder.

Returns: str | None: The ID of the prompt, or None if not set.

def build_base_prompt(self) -> str:
281    def build_base_prompt(self) -> str:
282        """Returns a saved prompt.
283
284        Returns:
285            str: The prompt string.
286        """
287        return self.prompt_model.prompt

Returns a saved prompt.

Returns: str: The prompt string.

def chain_of_thought_prompt(self) -> str | None:
289    def chain_of_thought_prompt(self) -> str | None:
290        return self.prompt_model.chain_of_thought_instructions

Build and return the chain of thought prompt string.

Returns: str: The constructed chain of thought prompt.

class TaskRunConfigPromptBuilder(BasePromptBuilder):
293class TaskRunConfigPromptBuilder(BasePromptBuilder):
294    """A prompt builder that looks up a static prompt in a task run config."""
295
296    def __init__(self, task: Task, run_config_prompt_id: str):
297        parts = run_config_prompt_id.split("::")
298        if len(parts) != 4:
299            raise ValueError(
300                f"Invalid task run config prompt ID: {run_config_prompt_id}. Expected format: 'task_run_config::[project_id]::[task_id]::[run_config_id]'."
301            )
302
303        task_id = parts[2]
304        if task_id != task.id:
305            raise ValueError(
306                f"Task run config prompt ID: {run_config_prompt_id}. Task ID mismatch. Expected: {task.id}, got: {task_id}."
307            )
308
309        run_config_id = parts[3]
310        run_config = next(
311            (
312                run_config
313                for run_config in task.run_configs(readonly=True)
314                if run_config.id == run_config_id
315            ),
316            None,
317        )
318        if not run_config:
319            raise ValueError(
320                f"Task run config ID not found: {run_config_id} for prompt id {run_config_prompt_id}"
321            )
322        if run_config.prompt is None:
323            raise ValueError(
324                f"Task run config ID {run_config_id} does not have a stored prompt. Used as prompt id {run_config_prompt_id}"
325            )
326
327        # Load the prompt from the model
328        self.prompt = run_config.prompt.prompt
329        self.cot_prompt = run_config.prompt.chain_of_thought_instructions
330        self.id = run_config_prompt_id
331
332        super().__init__(task)
333
334    def prompt_id(self) -> str | None:
335        return self.id
336
337    def build_base_prompt(self) -> str:
338        return self.prompt
339
340    def chain_of_thought_prompt(self) -> str | None:
341        return self.cot_prompt

A prompt builder that looks up a static prompt in a task run config.

TaskRunConfigPromptBuilder(task: kiln_ai.datamodel.Task, run_config_prompt_id: str)
296    def __init__(self, task: Task, run_config_prompt_id: str):
297        parts = run_config_prompt_id.split("::")
298        if len(parts) != 4:
299            raise ValueError(
300                f"Invalid task run config prompt ID: {run_config_prompt_id}. Expected format: 'task_run_config::[project_id]::[task_id]::[run_config_id]'."
301            )
302
303        task_id = parts[2]
304        if task_id != task.id:
305            raise ValueError(
306                f"Task run config prompt ID: {run_config_prompt_id}. Task ID mismatch. Expected: {task.id}, got: {task_id}."
307            )
308
309        run_config_id = parts[3]
310        run_config = next(
311            (
312                run_config
313                for run_config in task.run_configs(readonly=True)
314                if run_config.id == run_config_id
315            ),
316            None,
317        )
318        if not run_config:
319            raise ValueError(
320                f"Task run config ID not found: {run_config_id} for prompt id {run_config_prompt_id}"
321            )
322        if run_config.prompt is None:
323            raise ValueError(
324                f"Task run config ID {run_config_id} does not have a stored prompt. Used as prompt id {run_config_prompt_id}"
325            )
326
327        # Load the prompt from the model
328        self.prompt = run_config.prompt.prompt
329        self.cot_prompt = run_config.prompt.chain_of_thought_instructions
330        self.id = run_config_prompt_id
331
332        super().__init__(task)

Initialize the prompt builder with a task.

Args: task (Task): The task containing instructions and requirements.

prompt
cot_prompt
id
def prompt_id(self) -> str | None:
334    def prompt_id(self) -> str | None:
335        return self.id

Returns the ID of the prompt, scoped to this builder.

Returns: str | None: The ID of the prompt, or None if not set.

def build_base_prompt(self) -> str:
337    def build_base_prompt(self) -> str:
338        return self.prompt

Build and return the complete prompt string.

Returns: str: The constructed prompt.

def chain_of_thought_prompt(self) -> str | None:
340    def chain_of_thought_prompt(self) -> str | None:
341        return self.cot_prompt

Build and return the chain of thought prompt string.

Returns: str: The constructed chain of thought prompt.

class FineTunePromptBuilder(BasePromptBuilder):
344class FineTunePromptBuilder(BasePromptBuilder):
345    """A prompt builder that looks up a fine-tune prompt."""
346
347    def __init__(self, task: Task, nested_fine_tune_id: str):
348        super().__init__(task)
349
350        # IDs are in project_id::task_id::fine_tune_id format
351        self.full_fine_tune_id = nested_fine_tune_id
352        parts = nested_fine_tune_id.split("::")
353        if len(parts) != 3:
354            raise ValueError(
355                f"Invalid fine-tune ID format. Expected 'project_id::task_id::fine_tune_id', got: {nested_fine_tune_id}"
356            )
357        fine_tune_id = parts[2]
358
359        fine_tune_model = next(
360            (
361                fine_tune
362                for fine_tune in task.finetunes(readonly=True)
363                if fine_tune.id == fine_tune_id
364            ),
365            None,
366        )
367        if not fine_tune_model:
368            raise ValueError(f"Fine-tune ID not found: {fine_tune_id}")
369        self.fine_tune_model = fine_tune_model
370
371    def prompt_id(self) -> str | None:
372        return self.full_fine_tune_id
373
374    def build_base_prompt(self) -> str:
375        return self.fine_tune_model.system_message
376
377    def chain_of_thought_prompt(self) -> str | None:
378        return self.fine_tune_model.thinking_instructions

A prompt builder that looks up a fine-tune prompt.

FineTunePromptBuilder(task: kiln_ai.datamodel.Task, nested_fine_tune_id: str)
347    def __init__(self, task: Task, nested_fine_tune_id: str):
348        super().__init__(task)
349
350        # IDs are in project_id::task_id::fine_tune_id format
351        self.full_fine_tune_id = nested_fine_tune_id
352        parts = nested_fine_tune_id.split("::")
353        if len(parts) != 3:
354            raise ValueError(
355                f"Invalid fine-tune ID format. Expected 'project_id::task_id::fine_tune_id', got: {nested_fine_tune_id}"
356            )
357        fine_tune_id = parts[2]
358
359        fine_tune_model = next(
360            (
361                fine_tune
362                for fine_tune in task.finetunes(readonly=True)
363                if fine_tune.id == fine_tune_id
364            ),
365            None,
366        )
367        if not fine_tune_model:
368            raise ValueError(f"Fine-tune ID not found: {fine_tune_id}")
369        self.fine_tune_model = fine_tune_model

Initialize the prompt builder with a task.

Args: task (Task): The task containing instructions and requirements.

full_fine_tune_id
fine_tune_model
def prompt_id(self) -> str | None:
371    def prompt_id(self) -> str | None:
372        return self.full_fine_tune_id

Returns the ID of the prompt, scoped to this builder.

Returns: str | None: The ID of the prompt, or None if not set.

def build_base_prompt(self) -> str:
374    def build_base_prompt(self) -> str:
375        return self.fine_tune_model.system_message

Build and return the complete prompt string.

Returns: str: The constructed prompt.

def chain_of_thought_prompt(self) -> str | None:
377    def chain_of_thought_prompt(self) -> str | None:
378        return self.fine_tune_model.thinking_instructions

Build and return the chain of thought prompt string.

Returns: str: The constructed chain of thought prompt.

def prompt_builder_from_id( prompt_id: Annotated[str, AfterValidator(func=<function <lambda>>)], task: kiln_ai.datamodel.Task) -> BasePromptBuilder:
382def prompt_builder_from_id(prompt_id: PromptId, task: Task) -> BasePromptBuilder:
383    """Convert a name used in the UI to the corresponding prompt builder class.
384
385    Args:
386        prompt_id (PromptId): The prompt ID.
387
388    Returns:
389        type[BasePromptBuilder]: The corresponding prompt builder class.
390
391    Raises:
392        ValueError: If the UI name is not recognized.
393    """
394
395    # Saved prompts are prefixed with "id::"
396    if prompt_id.startswith("id::"):
397        prompt_id = prompt_id[4:]
398        return SavedPromptBuilder(task, prompt_id)
399
400    # Task run config prompts are prefixed with "task_run_config::"
401    # task_run_config::[project_id]::[task_id]::[run_config_id]
402    if prompt_id.startswith("task_run_config::"):
403        return TaskRunConfigPromptBuilder(task, prompt_id)
404
405    # Fine-tune prompts are prefixed with "fine_tune_prompt::"
406    if prompt_id.startswith("fine_tune_prompt::"):
407        prompt_id = prompt_id[18:]
408        return FineTunePromptBuilder(task, prompt_id)
409
410    # Check if the prompt_id matches any enum value
411    if prompt_id not in [member.value for member in PromptGenerators]:
412        raise ValueError(f"Unknown prompt generator: {prompt_id}")
413    typed_prompt_generator = PromptGenerators(prompt_id)
414
415    match typed_prompt_generator:
416        case PromptGenerators.SIMPLE:
417            return SimplePromptBuilder(task)
418        case PromptGenerators.FEW_SHOT:
419            return FewShotPromptBuilder(task)
420        case PromptGenerators.MULTI_SHOT:
421            return MultiShotPromptBuilder(task)
422        case PromptGenerators.REPAIRS:
423            return RepairsPromptBuilder(task)
424        case PromptGenerators.SIMPLE_CHAIN_OF_THOUGHT:
425            return SimpleChainOfThoughtPromptBuilder(task)
426        case PromptGenerators.FEW_SHOT_CHAIN_OF_THOUGHT:
427            return FewShotChainOfThoughtPromptBuilder(task)
428        case PromptGenerators.MULTI_SHOT_CHAIN_OF_THOUGHT:
429            return MultiShotChainOfThoughtPromptBuilder(task)
430        case _:
431            # Type checking will find missing cases
432            raise_exhaustive_enum_error(typed_prompt_generator)

Convert a name used in the UI to the corresponding prompt builder class.

Args: prompt_id (PromptId): The prompt ID.

Returns: type[BasePromptBuilder]: The corresponding prompt builder class.

Raises: ValueError: If the UI name is not recognized.