kiln_ai.adapters.prompt_builders

  1from __future__ import annotations
  2
  3from abc import ABCMeta, abstractmethod
  4from dataclasses import dataclass
  5from typing import TYPE_CHECKING
  6
  7from kiln_ai.datamodel import PromptGenerators, PromptId, Task, TaskRun
  8from kiln_ai.utils.exhaustive_error import raise_exhaustive_enum_error
  9
 10if TYPE_CHECKING:
 11    from kiln_ai.datamodel.skill import Skill
 12
 13
 14@dataclass
 15class PromptExample:
 16    """A simple input/output example for use in prompt building."""
 17
 18    input: str
 19    output: str
 20
 21
 22def build_skills_prompt_section(skills: list[Skill] | None) -> str | None:
 23    """Build a system prompt section listing available skills.
 24
 25    This is a standalone function so both the inference adapter and the
 26    fine-tune pipeline can include the same skills instructions in the
 27    system prompt.
 28    """
 29    if not skills:
 30        return None
 31
 32    skill_lines = "\n".join(f"- `{s.name}`\n  {s.description}" for s in skills)
 33
 34    return (
 35        "# Skills\n\n"
 36        "When a Skill is relevant, load it with `skill(name)` and follow its instructions.\n"
 37        "Load additional Skill resources only if needed with `skill(name, resource)`.\n"
 38        "## Available Skills\n\n"
 39        f"{skill_lines}"
 40    )
 41
 42
 43class BasePromptBuilder(metaclass=ABCMeta):
 44    """Base class for building prompts from tasks.
 45
 46    Provides the core interface and basic functionality for prompt builders.
 47    """
 48
 49    def __init__(self, task: Task):
 50        """Initialize the prompt builder with a task.
 51
 52        Args:
 53            task (Task): The task containing instructions and requirements.
 54        """
 55        self.task = task
 56
 57    def prompt_id(self) -> str | None:
 58        """Returns the ID of the prompt, scoped to this builder.
 59
 60        Returns:
 61            str | None: The ID of the prompt, or None if not set.
 62        """
 63        return None
 64
 65    def build_prompt(
 66        self,
 67        include_json_instructions: bool,
 68        skills: list[Skill] | None = None,
 69    ) -> str:
 70        """Build and return the complete prompt string.
 71
 72        Args:
 73            include_json_instructions: Whether to include JSON schema formatting instructions.
 74            skills: Optional list of skills to include in the prompt. When provided,
 75                a skills instruction section is appended so the model knows which
 76                skills are available.
 77
 78        Returns:
 79            str: The constructed prompt.
 80        """
 81        prompt = self.build_base_prompt()
 82
 83        if include_json_instructions and self.task.output_schema():
 84            prompt = (
 85                prompt
 86                + f"\n\n# Format Instructions\n\nReturn a JSON object conforming to the following schema:\n```\n{self.task.output_schema()}\n```"
 87            )
 88
 89        skills_section = build_skills_prompt_section(skills)
 90        if skills_section:
 91            prompt = prompt + "\n\n" + skills_section
 92
 93        return prompt
 94
 95    @abstractmethod
 96    def build_base_prompt(self) -> str:
 97        """Build and return the complete prompt string.
 98
 99        Returns:
100            str: The constructed prompt.
101        """
102        pass
103
104    def chain_of_thought_prompt(self) -> str | None:
105        """Build and return the chain of thought prompt string.
106
107        Returns:
108            str: The constructed chain of thought prompt.
109        """
110        return None
111
112    def build_prompt_for_ui(self, skills: list[Skill] | None = None) -> str:
113        """Build a prompt for the UI. It includes additional instructions (like chain of thought), even if they are passed to the model in stages.
114
115        Designed for end-user consumption, not for model consumption.
116
117        Args:
118            skills: Optional list of skills to include in the prompt display.
119
120        Returns:
121            str: The constructed prompt string.
122        """
123        base_prompt = self.build_prompt(include_json_instructions=False, skills=skills)
124        cot_prompt = self.chain_of_thought_prompt()
125        if cot_prompt:
126            base_prompt += "\n\n# Thinking Instructions\n\n" + cot_prompt
127        return base_prompt
128
129
130class SimplePromptBuilder(BasePromptBuilder):
131    """A basic prompt builder that combines task instruction with requirements."""
132
133    def build_base_prompt(self) -> str:
134        """Build a simple prompt with instruction and requirements.
135
136        Returns:
137            str: The constructed prompt string.
138        """
139        base_prompt = self.task.instruction
140
141        if len(self.task.requirements) > 0:
142            base_prompt += (
143                "\n\nYour response should respect the following requirements:\n"
144            )
145            # iterate requirements, formatting them in numbereed list like 1) task.instruction\n2)...
146            for i, requirement in enumerate(self.task.requirements):
147                base_prompt += f"{i + 1}) {requirement.instruction}\n"
148
149        return base_prompt
150
151
152class MultiShotPromptBuilder(BasePromptBuilder):
153    """A prompt builder that includes multiple examples in the prompt."""
154
155    @classmethod
156    def example_count(cls) -> int:
157        """Get the maximum number of examples to include in the prompt.
158
159        Returns:
160            int: The maximum number of examples (default 25).
161        """
162        return 25
163
164    def build_instruction_and_requirements(self) -> str:
165        """Build the instruction and requirements section of the prompt.
166
167        Returns:
168            str: The instruction and requirements sections.
169        """
170        base_prompt = f"# Instruction\n\n{self.task.instruction}\n\n"
171
172        if len(self.task.requirements) > 0:
173            base_prompt += "# Requirements\n\nYour response should respect the following requirements:\n"
174            for i, requirement in enumerate(self.task.requirements):
175                base_prompt += f"{i + 1}) {requirement.instruction}\n"
176            base_prompt += "\n"
177
178        return base_prompt
179
180    def build_base_prompt(self) -> str:
181        """Build a prompt with instruction, requirements, and multiple examples.
182
183        Returns:
184            str: The constructed prompt string with examples.
185        """
186        base_prompt = self.build_instruction_and_requirements()
187
188        valid_examples = self.collect_examples()
189
190        if len(valid_examples) == 0:
191            return base_prompt
192
193        base_prompt += "# Example Outputs\n\n"
194        for i, example in enumerate(valid_examples):
195            base_prompt += self.prompt_section_for_example(i, example)
196
197        return base_prompt
198
199    def prompt_section_for_example(self, index: int, example: TaskRun) -> str:
200        # Prefer repaired output if it exists, otherwise use the regular output
201        output = example.repaired_output or example.output
202        return f"## Example {index + 1}\n\nInput: {example.input}\nOutput: {output.output}\n\n"
203
204    def collect_examples(self) -> list[TaskRun]:
205        valid_examples: list[TaskRun] = []
206        runs = self.task.runs(readonly=True)
207
208        # first pass, we look for repaired outputs. These are the best examples.
209        for run in runs:
210            if len(valid_examples) >= self.__class__.example_count():
211                break
212            if run.repaired_output is not None:
213                valid_examples.append(run)
214
215        # second pass, we look for high quality outputs (rating based)
216        # Minimum is "high_quality" (4 star in star rating scale), then sort by rating
217        # exclude repaired outputs as they were used above
218        runs_with_rating = [
219            run
220            for run in runs
221            if run.output.rating is not None
222            and run.output.rating.value is not None
223            and run.output.rating.is_high_quality()
224            and run.repaired_output is None
225        ]
226        runs_with_rating.sort(
227            key=lambda x: (x.output.rating and x.output.rating.value) or 0, reverse=True
228        )
229        for run in runs_with_rating:
230            if len(valid_examples) >= self.__class__.example_count():
231                break
232            valid_examples.append(run)
233        return valid_examples
234
235
236class FewShotPromptBuilder(MultiShotPromptBuilder):
237    """A prompt builder that includes a small number of examples in the prompt."""
238
239    @classmethod
240    def example_count(cls) -> int:
241        """Get the maximum number of examples to include in the prompt.
242
243        Returns:
244            int: The maximum number of examples (4).
245        """
246        return 4
247
248
249class CustomExamplePromptBuilder(FewShotPromptBuilder):
250    """A prompt builder that uses custom examples instead of collecting from the dataset."""
251
252    def __init__(self, task: Task, examples: list[PromptExample] | None = None):
253        super().__init__(task)
254        self._custom_examples = examples or []
255
256    def collect_examples(self) -> list[TaskRun]:
257        """Override to return an empty list - we handle examples separately."""
258        return []
259
260    def build_base_prompt(self) -> str:
261        """Build a prompt with instruction, requirements, and custom examples."""
262        base_prompt = self.build_instruction_and_requirements()
263
264        if self._custom_examples:
265            base_prompt += "# Example Outputs\n\n"
266            for i, example in enumerate(self._custom_examples):
267                base_prompt += f"## Example {i + 1}\n\nInput: {example.input}\nOutput: {example.output}\n\n"
268
269        return base_prompt
270
271
272class RepairsPromptBuilder(MultiShotPromptBuilder):
273    """A prompt builder that includes multiple examples in the prompt, including repaired instructions describing what was wrong, and how it was fixed."""
274
275    def prompt_section_for_example(self, index: int, example: TaskRun) -> str:
276        if (
277            not example.repaired_output
278            or not example.repair_instructions
279            or not example.repaired_output.output
280        ):
281            return super().prompt_section_for_example(index, example)
282
283        prompt_section = f"## Example {index + 1}\n\nInput: {example.input}\n\n"
284        prompt_section += (
285            f"Initial Output Which Was Insufficient: {example.output.output}\n\n"
286        )
287        prompt_section += f"Instructions On How to Improve the Initial Output: {example.repair_instructions}\n\n"
288        prompt_section += (
289            f"Repaired Output Which is Sufficient: {example.repaired_output.output}\n\n"
290        )
291        return prompt_section
292
293
294def chain_of_thought_prompt(task: Task) -> str:
295    """Standard implementation to build and return the chain of thought prompt string.
296
297    Returns:
298        str: The constructed chain of thought prompt.
299    """
300
301    cot_instruction = task.thinking_instruction
302    if not cot_instruction:
303        cot_instruction = "Think step by step, explaining your reasoning."
304
305    return cot_instruction
306
307
308class SimpleChainOfThoughtPromptBuilder(SimplePromptBuilder):
309    """A prompt builder that includes a chain of thought prompt on top of the simple prompt."""
310
311    def chain_of_thought_prompt(self) -> str | None:
312        return chain_of_thought_prompt(self.task)
313
314
315class FewShotChainOfThoughtPromptBuilder(FewShotPromptBuilder):
316    """A prompt builder that includes a chain of thought prompt on top of the few shot prompt."""
317
318    def chain_of_thought_prompt(self) -> str | None:
319        return chain_of_thought_prompt(self.task)
320
321
322class MultiShotChainOfThoughtPromptBuilder(MultiShotPromptBuilder):
323    """A prompt builder that includes a chain of thought prompt on top of the multi shot prompt."""
324
325    def chain_of_thought_prompt(self) -> str | None:
326        return chain_of_thought_prompt(self.task)
327
328
329class SavedPromptBuilder(BasePromptBuilder):
330    """A prompt builder that looks up a static prompt."""
331
332    def __init__(self, task: Task, prompt_id: str):
333        super().__init__(task)
334        prompt_model = next(
335            (
336                prompt
337                for prompt in task.prompts(readonly=True)
338                if prompt.id == prompt_id
339            ),
340            None,
341        )
342        if not prompt_model:
343            raise ValueError(f"Prompt ID not found: {prompt_id}")
344        self.prompt_model = prompt_model
345
346    def prompt_id(self) -> str | None:
347        return self.prompt_model.id
348
349    def build_base_prompt(self) -> str:
350        """Returns a saved prompt.
351
352        Returns:
353            str: The prompt string.
354        """
355        return self.prompt_model.prompt
356
357    def chain_of_thought_prompt(self) -> str | None:
358        return self.prompt_model.chain_of_thought_instructions
359
360
361class TaskRunConfigPromptBuilder(BasePromptBuilder):
362    """A prompt builder that looks up a static prompt in a task run config."""
363
364    def __init__(self, task: Task, run_config_prompt_id: str):
365        parts = run_config_prompt_id.split("::")
366        if len(parts) != 4:
367            raise ValueError(
368                f"Invalid task run config prompt ID: {run_config_prompt_id}. Expected format: 'task_run_config::[project_id]::[task_id]::[run_config_id]'."
369            )
370
371        task_id = parts[2]
372        if task_id != task.id:
373            raise ValueError(
374                f"Task run config prompt ID: {run_config_prompt_id}. Task ID mismatch. Expected: {task.id}, got: {task_id}."
375            )
376
377        run_config_id = parts[3]
378        run_config = next(
379            (
380                run_config
381                for run_config in task.run_configs(readonly=True)
382                if run_config.id == run_config_id
383            ),
384            None,
385        )
386        if not run_config:
387            raise ValueError(
388                f"Task run config ID not found: {run_config_id} for prompt id {run_config_prompt_id}"
389            )
390        if run_config.prompt is None:
391            raise ValueError(
392                f"Task run config ID {run_config_id} does not have a stored prompt. Used as prompt id {run_config_prompt_id}"
393            )
394
395        # Load the prompt from the model
396        self.prompt = run_config.prompt.prompt
397        self.cot_prompt = run_config.prompt.chain_of_thought_instructions
398        self.id = run_config_prompt_id
399
400        super().__init__(task)
401
402    def prompt_id(self) -> str | None:
403        return self.id
404
405    def build_base_prompt(self) -> str:
406        return self.prompt
407
408    def chain_of_thought_prompt(self) -> str | None:
409        return self.cot_prompt
410
411
412class FineTunePromptBuilder(BasePromptBuilder):
413    """A prompt builder that looks up a fine-tune prompt."""
414
415    def __init__(self, task: Task, nested_fine_tune_id: str):
416        super().__init__(task)
417
418        # IDs are in project_id::task_id::fine_tune_id format
419        self.full_fine_tune_id = nested_fine_tune_id
420        parts = nested_fine_tune_id.split("::")
421        if len(parts) != 3:
422            raise ValueError(
423                f"Invalid fine-tune ID format. Expected 'project_id::task_id::fine_tune_id', got: {nested_fine_tune_id}"
424            )
425        fine_tune_id = parts[2]
426
427        fine_tune_model = next(
428            (
429                fine_tune
430                for fine_tune in task.finetunes(readonly=True)
431                if fine_tune.id == fine_tune_id
432            ),
433            None,
434        )
435        if not fine_tune_model:
436            raise ValueError(f"Fine-tune ID not found: {fine_tune_id}")
437        self.fine_tune_model = fine_tune_model
438
439    def prompt_id(self) -> str | None:
440        return self.full_fine_tune_id
441
442    def build_base_prompt(self) -> str:
443        return self.fine_tune_model.system_message
444
445    def chain_of_thought_prompt(self) -> str | None:
446        return self.fine_tune_model.thinking_instructions
447
448
449# Our UI has some names that are not the same as the class names, which also hint parameters.
450def prompt_builder_from_id(prompt_id: PromptId, task: Task) -> BasePromptBuilder:
451    """Convert a name used in the UI to the corresponding prompt builder class.
452
453    Args:
454        prompt_id (PromptId): The prompt ID.
455
456    Returns:
457        type[BasePromptBuilder]: The corresponding prompt builder class.
458
459    Raises:
460        ValueError: If the UI name is not recognized.
461    """
462
463    # Saved prompts are prefixed with "id::"
464    if prompt_id.startswith("id::"):
465        prompt_id = prompt_id[4:]
466        return SavedPromptBuilder(task, prompt_id)
467
468    # Task run config prompts are prefixed with "task_run_config::"
469    # task_run_config::[project_id]::[task_id]::[run_config_id]
470    if prompt_id.startswith("task_run_config::"):
471        return TaskRunConfigPromptBuilder(task, prompt_id)
472
473    # Fine-tune prompts are prefixed with "fine_tune_prompt::"
474    if prompt_id.startswith("fine_tune_prompt::"):
475        prompt_id = prompt_id[18:]
476        return FineTunePromptBuilder(task, prompt_id)
477
478    # Check if the prompt_id matches any enum value
479    if prompt_id not in [member.value for member in PromptGenerators]:
480        raise ValueError(f"Unknown prompt generator: {prompt_id}")
481    typed_prompt_generator = PromptGenerators(prompt_id)
482
483    match typed_prompt_generator:
484        case PromptGenerators.SIMPLE:
485            return SimplePromptBuilder(task)
486        case PromptGenerators.FEW_SHOT:
487            return FewShotPromptBuilder(task)
488        case PromptGenerators.MULTI_SHOT:
489            return MultiShotPromptBuilder(task)
490        case PromptGenerators.REPAIRS:
491            return RepairsPromptBuilder(task)
492        case PromptGenerators.SIMPLE_CHAIN_OF_THOUGHT:
493            return SimpleChainOfThoughtPromptBuilder(task)
494        case PromptGenerators.FEW_SHOT_CHAIN_OF_THOUGHT:
495            return FewShotChainOfThoughtPromptBuilder(task)
496        case PromptGenerators.MULTI_SHOT_CHAIN_OF_THOUGHT:
497            return MultiShotChainOfThoughtPromptBuilder(task)
498        case _:
499            # Type checking will find missing cases
500            raise_exhaustive_enum_error(typed_prompt_generator)
@dataclass
class PromptExample:
15@dataclass
16class PromptExample:
17    """A simple input/output example for use in prompt building."""
18
19    input: str
20    output: str

A simple input/output example for use in prompt building.

PromptExample(input: str, output: str)
input: str
output: str
def build_skills_prompt_section(skills: list[kiln_ai.datamodel.Skill] | None) -> str | None:
23def build_skills_prompt_section(skills: list[Skill] | None) -> str | None:
24    """Build a system prompt section listing available skills.
25
26    This is a standalone function so both the inference adapter and the
27    fine-tune pipeline can include the same skills instructions in the
28    system prompt.
29    """
30    if not skills:
31        return None
32
33    skill_lines = "\n".join(f"- `{s.name}`\n  {s.description}" for s in skills)
34
35    return (
36        "# Skills\n\n"
37        "When a Skill is relevant, load it with `skill(name)` and follow its instructions.\n"
38        "Load additional Skill resources only if needed with `skill(name, resource)`.\n"
39        "## Available Skills\n\n"
40        f"{skill_lines}"
41    )

Build a system prompt section listing available skills.

This is a standalone function so both the inference adapter and the fine-tune pipeline can include the same skills instructions in the system prompt.

class BasePromptBuilder:
 44class BasePromptBuilder(metaclass=ABCMeta):
 45    """Base class for building prompts from tasks.
 46
 47    Provides the core interface and basic functionality for prompt builders.
 48    """
 49
 50    def __init__(self, task: Task):
 51        """Initialize the prompt builder with a task.
 52
 53        Args:
 54            task (Task): The task containing instructions and requirements.
 55        """
 56        self.task = task
 57
 58    def prompt_id(self) -> str | None:
 59        """Returns the ID of the prompt, scoped to this builder.
 60
 61        Returns:
 62            str | None: The ID of the prompt, or None if not set.
 63        """
 64        return None
 65
 66    def build_prompt(
 67        self,
 68        include_json_instructions: bool,
 69        skills: list[Skill] | None = None,
 70    ) -> str:
 71        """Build and return the complete prompt string.
 72
 73        Args:
 74            include_json_instructions: Whether to include JSON schema formatting instructions.
 75            skills: Optional list of skills to include in the prompt. When provided,
 76                a skills instruction section is appended so the model knows which
 77                skills are available.
 78
 79        Returns:
 80            str: The constructed prompt.
 81        """
 82        prompt = self.build_base_prompt()
 83
 84        if include_json_instructions and self.task.output_schema():
 85            prompt = (
 86                prompt
 87                + f"\n\n# Format Instructions\n\nReturn a JSON object conforming to the following schema:\n```\n{self.task.output_schema()}\n```"
 88            )
 89
 90        skills_section = build_skills_prompt_section(skills)
 91        if skills_section:
 92            prompt = prompt + "\n\n" + skills_section
 93
 94        return prompt
 95
 96    @abstractmethod
 97    def build_base_prompt(self) -> str:
 98        """Build and return the complete prompt string.
 99
100        Returns:
101            str: The constructed prompt.
102        """
103        pass
104
105    def chain_of_thought_prompt(self) -> str | None:
106        """Build and return the chain of thought prompt string.
107
108        Returns:
109            str: The constructed chain of thought prompt.
110        """
111        return None
112
113    def build_prompt_for_ui(self, skills: list[Skill] | None = None) -> str:
114        """Build a prompt for the UI. It includes additional instructions (like chain of thought), even if they are passed to the model in stages.
115
116        Designed for end-user consumption, not for model consumption.
117
118        Args:
119            skills: Optional list of skills to include in the prompt display.
120
121        Returns:
122            str: The constructed prompt string.
123        """
124        base_prompt = self.build_prompt(include_json_instructions=False, skills=skills)
125        cot_prompt = self.chain_of_thought_prompt()
126        if cot_prompt:
127            base_prompt += "\n\n# Thinking Instructions\n\n" + cot_prompt
128        return base_prompt

Base class for building prompts from tasks.

Provides the core interface and basic functionality for prompt builders.

BasePromptBuilder(task: kiln_ai.datamodel.Task)
50    def __init__(self, task: Task):
51        """Initialize the prompt builder with a task.
52
53        Args:
54            task (Task): The task containing instructions and requirements.
55        """
56        self.task = task

Initialize the prompt builder with a task.

Args: task (Task): The task containing instructions and requirements.

task
def prompt_id(self) -> str | None:
58    def prompt_id(self) -> str | None:
59        """Returns the ID of the prompt, scoped to this builder.
60
61        Returns:
62            str | None: The ID of the prompt, or None if not set.
63        """
64        return None

Returns the ID of the prompt, scoped to this builder.

Returns: str | None: The ID of the prompt, or None if not set.

def build_prompt( self, include_json_instructions: bool, skills: list[kiln_ai.datamodel.Skill] | None = None) -> str:
66    def build_prompt(
67        self,
68        include_json_instructions: bool,
69        skills: list[Skill] | None = None,
70    ) -> str:
71        """Build and return the complete prompt string.
72
73        Args:
74            include_json_instructions: Whether to include JSON schema formatting instructions.
75            skills: Optional list of skills to include in the prompt. When provided,
76                a skills instruction section is appended so the model knows which
77                skills are available.
78
79        Returns:
80            str: The constructed prompt.
81        """
82        prompt = self.build_base_prompt()
83
84        if include_json_instructions and self.task.output_schema():
85            prompt = (
86                prompt
87                + f"\n\n# Format Instructions\n\nReturn a JSON object conforming to the following schema:\n```\n{self.task.output_schema()}\n```"
88            )
89
90        skills_section = build_skills_prompt_section(skills)
91        if skills_section:
92            prompt = prompt + "\n\n" + skills_section
93
94        return prompt

Build and return the complete prompt string.

Args: include_json_instructions: Whether to include JSON schema formatting instructions. skills: Optional list of skills to include in the prompt. When provided, a skills instruction section is appended so the model knows which skills are available.

Returns: str: The constructed prompt.

@abstractmethod
def build_base_prompt(self) -> str:
 96    @abstractmethod
 97    def build_base_prompt(self) -> str:
 98        """Build and return the complete prompt string.
 99
100        Returns:
101            str: The constructed prompt.
102        """
103        pass

Build and return the complete prompt string.

Returns: str: The constructed prompt.

def chain_of_thought_prompt(self) -> str | None:
105    def chain_of_thought_prompt(self) -> str | None:
106        """Build and return the chain of thought prompt string.
107
108        Returns:
109            str: The constructed chain of thought prompt.
110        """
111        return None

Build and return the chain of thought prompt string.

Returns: str: The constructed chain of thought prompt.

def build_prompt_for_ui(self, skills: list[kiln_ai.datamodel.Skill] | None = None) -> str:
113    def build_prompt_for_ui(self, skills: list[Skill] | None = None) -> str:
114        """Build a prompt for the UI. It includes additional instructions (like chain of thought), even if they are passed to the model in stages.
115
116        Designed for end-user consumption, not for model consumption.
117
118        Args:
119            skills: Optional list of skills to include in the prompt display.
120
121        Returns:
122            str: The constructed prompt string.
123        """
124        base_prompt = self.build_prompt(include_json_instructions=False, skills=skills)
125        cot_prompt = self.chain_of_thought_prompt()
126        if cot_prompt:
127            base_prompt += "\n\n# Thinking Instructions\n\n" + cot_prompt
128        return base_prompt

Build a prompt for the UI. It includes additional instructions (like chain of thought), even if they are passed to the model in stages.

Designed for end-user consumption, not for model consumption.

Args: skills: Optional list of skills to include in the prompt display.

Returns: str: The constructed prompt string.

class SimplePromptBuilder(BasePromptBuilder):
131class SimplePromptBuilder(BasePromptBuilder):
132    """A basic prompt builder that combines task instruction with requirements."""
133
134    def build_base_prompt(self) -> str:
135        """Build a simple prompt with instruction and requirements.
136
137        Returns:
138            str: The constructed prompt string.
139        """
140        base_prompt = self.task.instruction
141
142        if len(self.task.requirements) > 0:
143            base_prompt += (
144                "\n\nYour response should respect the following requirements:\n"
145            )
146            # iterate requirements, formatting them in numbereed list like 1) task.instruction\n2)...
147            for i, requirement in enumerate(self.task.requirements):
148                base_prompt += f"{i + 1}) {requirement.instruction}\n"
149
150        return base_prompt

A basic prompt builder that combines task instruction with requirements.

def build_base_prompt(self) -> str:
134    def build_base_prompt(self) -> str:
135        """Build a simple prompt with instruction and requirements.
136
137        Returns:
138            str: The constructed prompt string.
139        """
140        base_prompt = self.task.instruction
141
142        if len(self.task.requirements) > 0:
143            base_prompt += (
144                "\n\nYour response should respect the following requirements:\n"
145            )
146            # iterate requirements, formatting them in numbereed list like 1) task.instruction\n2)...
147            for i, requirement in enumerate(self.task.requirements):
148                base_prompt += f"{i + 1}) {requirement.instruction}\n"
149
150        return base_prompt

Build a simple prompt with instruction and requirements.

Returns: str: The constructed prompt string.

class MultiShotPromptBuilder(BasePromptBuilder):
153class MultiShotPromptBuilder(BasePromptBuilder):
154    """A prompt builder that includes multiple examples in the prompt."""
155
156    @classmethod
157    def example_count(cls) -> int:
158        """Get the maximum number of examples to include in the prompt.
159
160        Returns:
161            int: The maximum number of examples (default 25).
162        """
163        return 25
164
165    def build_instruction_and_requirements(self) -> str:
166        """Build the instruction and requirements section of the prompt.
167
168        Returns:
169            str: The instruction and requirements sections.
170        """
171        base_prompt = f"# Instruction\n\n{self.task.instruction}\n\n"
172
173        if len(self.task.requirements) > 0:
174            base_prompt += "# Requirements\n\nYour response should respect the following requirements:\n"
175            for i, requirement in enumerate(self.task.requirements):
176                base_prompt += f"{i + 1}) {requirement.instruction}\n"
177            base_prompt += "\n"
178
179        return base_prompt
180
181    def build_base_prompt(self) -> str:
182        """Build a prompt with instruction, requirements, and multiple examples.
183
184        Returns:
185            str: The constructed prompt string with examples.
186        """
187        base_prompt = self.build_instruction_and_requirements()
188
189        valid_examples = self.collect_examples()
190
191        if len(valid_examples) == 0:
192            return base_prompt
193
194        base_prompt += "# Example Outputs\n\n"
195        for i, example in enumerate(valid_examples):
196            base_prompt += self.prompt_section_for_example(i, example)
197
198        return base_prompt
199
200    def prompt_section_for_example(self, index: int, example: TaskRun) -> str:
201        # Prefer repaired output if it exists, otherwise use the regular output
202        output = example.repaired_output or example.output
203        return f"## Example {index + 1}\n\nInput: {example.input}\nOutput: {output.output}\n\n"
204
205    def collect_examples(self) -> list[TaskRun]:
206        valid_examples: list[TaskRun] = []
207        runs = self.task.runs(readonly=True)
208
209        # first pass, we look for repaired outputs. These are the best examples.
210        for run in runs:
211            if len(valid_examples) >= self.__class__.example_count():
212                break
213            if run.repaired_output is not None:
214                valid_examples.append(run)
215
216        # second pass, we look for high quality outputs (rating based)
217        # Minimum is "high_quality" (4 star in star rating scale), then sort by rating
218        # exclude repaired outputs as they were used above
219        runs_with_rating = [
220            run
221            for run in runs
222            if run.output.rating is not None
223            and run.output.rating.value is not None
224            and run.output.rating.is_high_quality()
225            and run.repaired_output is None
226        ]
227        runs_with_rating.sort(
228            key=lambda x: (x.output.rating and x.output.rating.value) or 0, reverse=True
229        )
230        for run in runs_with_rating:
231            if len(valid_examples) >= self.__class__.example_count():
232                break
233            valid_examples.append(run)
234        return valid_examples

A prompt builder that includes multiple examples in the prompt.

@classmethod
def example_count(cls) -> int:
156    @classmethod
157    def example_count(cls) -> int:
158        """Get the maximum number of examples to include in the prompt.
159
160        Returns:
161            int: The maximum number of examples (default 25).
162        """
163        return 25

Get the maximum number of examples to include in the prompt.

Returns: int: The maximum number of examples (default 25).

def build_instruction_and_requirements(self) -> str:
165    def build_instruction_and_requirements(self) -> str:
166        """Build the instruction and requirements section of the prompt.
167
168        Returns:
169            str: The instruction and requirements sections.
170        """
171        base_prompt = f"# Instruction\n\n{self.task.instruction}\n\n"
172
173        if len(self.task.requirements) > 0:
174            base_prompt += "# Requirements\n\nYour response should respect the following requirements:\n"
175            for i, requirement in enumerate(self.task.requirements):
176                base_prompt += f"{i + 1}) {requirement.instruction}\n"
177            base_prompt += "\n"
178
179        return base_prompt

Build the instruction and requirements section of the prompt.

Returns: str: The instruction and requirements sections.

def build_base_prompt(self) -> str:
181    def build_base_prompt(self) -> str:
182        """Build a prompt with instruction, requirements, and multiple examples.
183
184        Returns:
185            str: The constructed prompt string with examples.
186        """
187        base_prompt = self.build_instruction_and_requirements()
188
189        valid_examples = self.collect_examples()
190
191        if len(valid_examples) == 0:
192            return base_prompt
193
194        base_prompt += "# Example Outputs\n\n"
195        for i, example in enumerate(valid_examples):
196            base_prompt += self.prompt_section_for_example(i, example)
197
198        return base_prompt

Build a prompt with instruction, requirements, and multiple examples.

Returns: str: The constructed prompt string with examples.

def prompt_section_for_example(self, index: int, example: kiln_ai.datamodel.TaskRun) -> str:
200    def prompt_section_for_example(self, index: int, example: TaskRun) -> str:
201        # Prefer repaired output if it exists, otherwise use the regular output
202        output = example.repaired_output or example.output
203        return f"## Example {index + 1}\n\nInput: {example.input}\nOutput: {output.output}\n\n"
def collect_examples(self) -> list[kiln_ai.datamodel.TaskRun]:
205    def collect_examples(self) -> list[TaskRun]:
206        valid_examples: list[TaskRun] = []
207        runs = self.task.runs(readonly=True)
208
209        # first pass, we look for repaired outputs. These are the best examples.
210        for run in runs:
211            if len(valid_examples) >= self.__class__.example_count():
212                break
213            if run.repaired_output is not None:
214                valid_examples.append(run)
215
216        # second pass, we look for high quality outputs (rating based)
217        # Minimum is "high_quality" (4 star in star rating scale), then sort by rating
218        # exclude repaired outputs as they were used above
219        runs_with_rating = [
220            run
221            for run in runs
222            if run.output.rating is not None
223            and run.output.rating.value is not None
224            and run.output.rating.is_high_quality()
225            and run.repaired_output is None
226        ]
227        runs_with_rating.sort(
228            key=lambda x: (x.output.rating and x.output.rating.value) or 0, reverse=True
229        )
230        for run in runs_with_rating:
231            if len(valid_examples) >= self.__class__.example_count():
232                break
233            valid_examples.append(run)
234        return valid_examples
class FewShotPromptBuilder(MultiShotPromptBuilder):
237class FewShotPromptBuilder(MultiShotPromptBuilder):
238    """A prompt builder that includes a small number of examples in the prompt."""
239
240    @classmethod
241    def example_count(cls) -> int:
242        """Get the maximum number of examples to include in the prompt.
243
244        Returns:
245            int: The maximum number of examples (4).
246        """
247        return 4

A prompt builder that includes a small number of examples in the prompt.

@classmethod
def example_count(cls) -> int:
240    @classmethod
241    def example_count(cls) -> int:
242        """Get the maximum number of examples to include in the prompt.
243
244        Returns:
245            int: The maximum number of examples (4).
246        """
247        return 4

Get the maximum number of examples to include in the prompt.

Returns: int: The maximum number of examples (4).

class CustomExamplePromptBuilder(FewShotPromptBuilder):
250class CustomExamplePromptBuilder(FewShotPromptBuilder):
251    """A prompt builder that uses custom examples instead of collecting from the dataset."""
252
253    def __init__(self, task: Task, examples: list[PromptExample] | None = None):
254        super().__init__(task)
255        self._custom_examples = examples or []
256
257    def collect_examples(self) -> list[TaskRun]:
258        """Override to return an empty list - we handle examples separately."""
259        return []
260
261    def build_base_prompt(self) -> str:
262        """Build a prompt with instruction, requirements, and custom examples."""
263        base_prompt = self.build_instruction_and_requirements()
264
265        if self._custom_examples:
266            base_prompt += "# Example Outputs\n\n"
267            for i, example in enumerate(self._custom_examples):
268                base_prompt += f"## Example {i + 1}\n\nInput: {example.input}\nOutput: {example.output}\n\n"
269
270        return base_prompt

A prompt builder that uses custom examples instead of collecting from the dataset.

CustomExamplePromptBuilder( task: kiln_ai.datamodel.Task, examples: list[PromptExample] | None = None)
253    def __init__(self, task: Task, examples: list[PromptExample] | None = None):
254        super().__init__(task)
255        self._custom_examples = examples or []

Initialize the prompt builder with a task.

Args: task (Task): The task containing instructions and requirements.

def collect_examples(self) -> list[kiln_ai.datamodel.TaskRun]:
257    def collect_examples(self) -> list[TaskRun]:
258        """Override to return an empty list - we handle examples separately."""
259        return []

Override to return an empty list - we handle examples separately.

def build_base_prompt(self) -> str:
261    def build_base_prompt(self) -> str:
262        """Build a prompt with instruction, requirements, and custom examples."""
263        base_prompt = self.build_instruction_and_requirements()
264
265        if self._custom_examples:
266            base_prompt += "# Example Outputs\n\n"
267            for i, example in enumerate(self._custom_examples):
268                base_prompt += f"## Example {i + 1}\n\nInput: {example.input}\nOutput: {example.output}\n\n"
269
270        return base_prompt

Build a prompt with instruction, requirements, and custom examples.

class RepairsPromptBuilder(MultiShotPromptBuilder):
273class RepairsPromptBuilder(MultiShotPromptBuilder):
274    """A prompt builder that includes multiple examples in the prompt, including repaired instructions describing what was wrong, and how it was fixed."""
275
276    def prompt_section_for_example(self, index: int, example: TaskRun) -> str:
277        if (
278            not example.repaired_output
279            or not example.repair_instructions
280            or not example.repaired_output.output
281        ):
282            return super().prompt_section_for_example(index, example)
283
284        prompt_section = f"## Example {index + 1}\n\nInput: {example.input}\n\n"
285        prompt_section += (
286            f"Initial Output Which Was Insufficient: {example.output.output}\n\n"
287        )
288        prompt_section += f"Instructions On How to Improve the Initial Output: {example.repair_instructions}\n\n"
289        prompt_section += (
290            f"Repaired Output Which is Sufficient: {example.repaired_output.output}\n\n"
291        )
292        return prompt_section

A prompt builder that includes multiple examples in the prompt, including repaired instructions describing what was wrong, and how it was fixed.

def prompt_section_for_example(self, index: int, example: kiln_ai.datamodel.TaskRun) -> str:
276    def prompt_section_for_example(self, index: int, example: TaskRun) -> str:
277        if (
278            not example.repaired_output
279            or not example.repair_instructions
280            or not example.repaired_output.output
281        ):
282            return super().prompt_section_for_example(index, example)
283
284        prompt_section = f"## Example {index + 1}\n\nInput: {example.input}\n\n"
285        prompt_section += (
286            f"Initial Output Which Was Insufficient: {example.output.output}\n\n"
287        )
288        prompt_section += f"Instructions On How to Improve the Initial Output: {example.repair_instructions}\n\n"
289        prompt_section += (
290            f"Repaired Output Which is Sufficient: {example.repaired_output.output}\n\n"
291        )
292        return prompt_section
def chain_of_thought_prompt(task: kiln_ai.datamodel.Task) -> str:
295def chain_of_thought_prompt(task: Task) -> str:
296    """Standard implementation to build and return the chain of thought prompt string.
297
298    Returns:
299        str: The constructed chain of thought prompt.
300    """
301
302    cot_instruction = task.thinking_instruction
303    if not cot_instruction:
304        cot_instruction = "Think step by step, explaining your reasoning."
305
306    return cot_instruction

Standard implementation to build and return the chain of thought prompt string.

Returns: str: The constructed chain of thought prompt.

class SimpleChainOfThoughtPromptBuilder(SimplePromptBuilder):
309class SimpleChainOfThoughtPromptBuilder(SimplePromptBuilder):
310    """A prompt builder that includes a chain of thought prompt on top of the simple prompt."""
311
312    def chain_of_thought_prompt(self) -> str | None:
313        return chain_of_thought_prompt(self.task)

A prompt builder that includes a chain of thought prompt on top of the simple prompt.

def chain_of_thought_prompt(self) -> str | None:
312    def chain_of_thought_prompt(self) -> str | None:
313        return chain_of_thought_prompt(self.task)

Build and return the chain of thought prompt string.

Returns: str: The constructed chain of thought prompt.

class FewShotChainOfThoughtPromptBuilder(FewShotPromptBuilder):
316class FewShotChainOfThoughtPromptBuilder(FewShotPromptBuilder):
317    """A prompt builder that includes a chain of thought prompt on top of the few shot prompt."""
318
319    def chain_of_thought_prompt(self) -> str | None:
320        return chain_of_thought_prompt(self.task)

A prompt builder that includes a chain of thought prompt on top of the few shot prompt.

def chain_of_thought_prompt(self) -> str | None:
319    def chain_of_thought_prompt(self) -> str | None:
320        return chain_of_thought_prompt(self.task)

Build and return the chain of thought prompt string.

Returns: str: The constructed chain of thought prompt.

class MultiShotChainOfThoughtPromptBuilder(MultiShotPromptBuilder):
323class MultiShotChainOfThoughtPromptBuilder(MultiShotPromptBuilder):
324    """A prompt builder that includes a chain of thought prompt on top of the multi shot prompt."""
325
326    def chain_of_thought_prompt(self) -> str | None:
327        return chain_of_thought_prompt(self.task)

A prompt builder that includes a chain of thought prompt on top of the multi shot prompt.

def chain_of_thought_prompt(self) -> str | None:
326    def chain_of_thought_prompt(self) -> str | None:
327        return chain_of_thought_prompt(self.task)

Build and return the chain of thought prompt string.

Returns: str: The constructed chain of thought prompt.

class SavedPromptBuilder(BasePromptBuilder):
330class SavedPromptBuilder(BasePromptBuilder):
331    """A prompt builder that looks up a static prompt."""
332
333    def __init__(self, task: Task, prompt_id: str):
334        super().__init__(task)
335        prompt_model = next(
336            (
337                prompt
338                for prompt in task.prompts(readonly=True)
339                if prompt.id == prompt_id
340            ),
341            None,
342        )
343        if not prompt_model:
344            raise ValueError(f"Prompt ID not found: {prompt_id}")
345        self.prompt_model = prompt_model
346
347    def prompt_id(self) -> str | None:
348        return self.prompt_model.id
349
350    def build_base_prompt(self) -> str:
351        """Returns a saved prompt.
352
353        Returns:
354            str: The prompt string.
355        """
356        return self.prompt_model.prompt
357
358    def chain_of_thought_prompt(self) -> str | None:
359        return self.prompt_model.chain_of_thought_instructions

A prompt builder that looks up a static prompt.

SavedPromptBuilder(task: kiln_ai.datamodel.Task, prompt_id: str)
333    def __init__(self, task: Task, prompt_id: str):
334        super().__init__(task)
335        prompt_model = next(
336            (
337                prompt
338                for prompt in task.prompts(readonly=True)
339                if prompt.id == prompt_id
340            ),
341            None,
342        )
343        if not prompt_model:
344            raise ValueError(f"Prompt ID not found: {prompt_id}")
345        self.prompt_model = prompt_model

Initialize the prompt builder with a task.

Args: task (Task): The task containing instructions and requirements.

prompt_model
def prompt_id(self) -> str | None:
347    def prompt_id(self) -> str | None:
348        return self.prompt_model.id

Returns the ID of the prompt, scoped to this builder.

Returns: str | None: The ID of the prompt, or None if not set.

def build_base_prompt(self) -> str:
350    def build_base_prompt(self) -> str:
351        """Returns a saved prompt.
352
353        Returns:
354            str: The prompt string.
355        """
356        return self.prompt_model.prompt

Returns a saved prompt.

Returns: str: The prompt string.

def chain_of_thought_prompt(self) -> str | None:
358    def chain_of_thought_prompt(self) -> str | None:
359        return self.prompt_model.chain_of_thought_instructions

Build and return the chain of thought prompt string.

Returns: str: The constructed chain of thought prompt.

class TaskRunConfigPromptBuilder(BasePromptBuilder):
362class TaskRunConfigPromptBuilder(BasePromptBuilder):
363    """A prompt builder that looks up a static prompt in a task run config."""
364
365    def __init__(self, task: Task, run_config_prompt_id: str):
366        parts = run_config_prompt_id.split("::")
367        if len(parts) != 4:
368            raise ValueError(
369                f"Invalid task run config prompt ID: {run_config_prompt_id}. Expected format: 'task_run_config::[project_id]::[task_id]::[run_config_id]'."
370            )
371
372        task_id = parts[2]
373        if task_id != task.id:
374            raise ValueError(
375                f"Task run config prompt ID: {run_config_prompt_id}. Task ID mismatch. Expected: {task.id}, got: {task_id}."
376            )
377
378        run_config_id = parts[3]
379        run_config = next(
380            (
381                run_config
382                for run_config in task.run_configs(readonly=True)
383                if run_config.id == run_config_id
384            ),
385            None,
386        )
387        if not run_config:
388            raise ValueError(
389                f"Task run config ID not found: {run_config_id} for prompt id {run_config_prompt_id}"
390            )
391        if run_config.prompt is None:
392            raise ValueError(
393                f"Task run config ID {run_config_id} does not have a stored prompt. Used as prompt id {run_config_prompt_id}"
394            )
395
396        # Load the prompt from the model
397        self.prompt = run_config.prompt.prompt
398        self.cot_prompt = run_config.prompt.chain_of_thought_instructions
399        self.id = run_config_prompt_id
400
401        super().__init__(task)
402
403    def prompt_id(self) -> str | None:
404        return self.id
405
406    def build_base_prompt(self) -> str:
407        return self.prompt
408
409    def chain_of_thought_prompt(self) -> str | None:
410        return self.cot_prompt

A prompt builder that looks up a static prompt in a task run config.

TaskRunConfigPromptBuilder(task: kiln_ai.datamodel.Task, run_config_prompt_id: str)
365    def __init__(self, task: Task, run_config_prompt_id: str):
366        parts = run_config_prompt_id.split("::")
367        if len(parts) != 4:
368            raise ValueError(
369                f"Invalid task run config prompt ID: {run_config_prompt_id}. Expected format: 'task_run_config::[project_id]::[task_id]::[run_config_id]'."
370            )
371
372        task_id = parts[2]
373        if task_id != task.id:
374            raise ValueError(
375                f"Task run config prompt ID: {run_config_prompt_id}. Task ID mismatch. Expected: {task.id}, got: {task_id}."
376            )
377
378        run_config_id = parts[3]
379        run_config = next(
380            (
381                run_config
382                for run_config in task.run_configs(readonly=True)
383                if run_config.id == run_config_id
384            ),
385            None,
386        )
387        if not run_config:
388            raise ValueError(
389                f"Task run config ID not found: {run_config_id} for prompt id {run_config_prompt_id}"
390            )
391        if run_config.prompt is None:
392            raise ValueError(
393                f"Task run config ID {run_config_id} does not have a stored prompt. Used as prompt id {run_config_prompt_id}"
394            )
395
396        # Load the prompt from the model
397        self.prompt = run_config.prompt.prompt
398        self.cot_prompt = run_config.prompt.chain_of_thought_instructions
399        self.id = run_config_prompt_id
400
401        super().__init__(task)

Initialize the prompt builder with a task.

Args: task (Task): The task containing instructions and requirements.

prompt
cot_prompt
id
def prompt_id(self) -> str | None:
403    def prompt_id(self) -> str | None:
404        return self.id

Returns the ID of the prompt, scoped to this builder.

Returns: str | None: The ID of the prompt, or None if not set.

def build_base_prompt(self) -> str:
406    def build_base_prompt(self) -> str:
407        return self.prompt

Build and return the complete prompt string.

Returns: str: The constructed prompt.

def chain_of_thought_prompt(self) -> str | None:
409    def chain_of_thought_prompt(self) -> str | None:
410        return self.cot_prompt

Build and return the chain of thought prompt string.

Returns: str: The constructed chain of thought prompt.

class FineTunePromptBuilder(BasePromptBuilder):
413class FineTunePromptBuilder(BasePromptBuilder):
414    """A prompt builder that looks up a fine-tune prompt."""
415
416    def __init__(self, task: Task, nested_fine_tune_id: str):
417        super().__init__(task)
418
419        # IDs are in project_id::task_id::fine_tune_id format
420        self.full_fine_tune_id = nested_fine_tune_id
421        parts = nested_fine_tune_id.split("::")
422        if len(parts) != 3:
423            raise ValueError(
424                f"Invalid fine-tune ID format. Expected 'project_id::task_id::fine_tune_id', got: {nested_fine_tune_id}"
425            )
426        fine_tune_id = parts[2]
427
428        fine_tune_model = next(
429            (
430                fine_tune
431                for fine_tune in task.finetunes(readonly=True)
432                if fine_tune.id == fine_tune_id
433            ),
434            None,
435        )
436        if not fine_tune_model:
437            raise ValueError(f"Fine-tune ID not found: {fine_tune_id}")
438        self.fine_tune_model = fine_tune_model
439
440    def prompt_id(self) -> str | None:
441        return self.full_fine_tune_id
442
443    def build_base_prompt(self) -> str:
444        return self.fine_tune_model.system_message
445
446    def chain_of_thought_prompt(self) -> str | None:
447        return self.fine_tune_model.thinking_instructions

A prompt builder that looks up a fine-tune prompt.

FineTunePromptBuilder(task: kiln_ai.datamodel.Task, nested_fine_tune_id: str)
416    def __init__(self, task: Task, nested_fine_tune_id: str):
417        super().__init__(task)
418
419        # IDs are in project_id::task_id::fine_tune_id format
420        self.full_fine_tune_id = nested_fine_tune_id
421        parts = nested_fine_tune_id.split("::")
422        if len(parts) != 3:
423            raise ValueError(
424                f"Invalid fine-tune ID format. Expected 'project_id::task_id::fine_tune_id', got: {nested_fine_tune_id}"
425            )
426        fine_tune_id = parts[2]
427
428        fine_tune_model = next(
429            (
430                fine_tune
431                for fine_tune in task.finetunes(readonly=True)
432                if fine_tune.id == fine_tune_id
433            ),
434            None,
435        )
436        if not fine_tune_model:
437            raise ValueError(f"Fine-tune ID not found: {fine_tune_id}")
438        self.fine_tune_model = fine_tune_model

Initialize the prompt builder with a task.

Args: task (Task): The task containing instructions and requirements.

full_fine_tune_id
fine_tune_model
def prompt_id(self) -> str | None:
440    def prompt_id(self) -> str | None:
441        return self.full_fine_tune_id

Returns the ID of the prompt, scoped to this builder.

Returns: str | None: The ID of the prompt, or None if not set.

def build_base_prompt(self) -> str:
443    def build_base_prompt(self) -> str:
444        return self.fine_tune_model.system_message

Build and return the complete prompt string.

Returns: str: The constructed prompt.

def chain_of_thought_prompt(self) -> str | None:
446    def chain_of_thought_prompt(self) -> str | None:
447        return self.fine_tune_model.thinking_instructions

Build and return the chain of thought prompt string.

Returns: str: The constructed chain of thought prompt.

def prompt_builder_from_id( prompt_id: Annotated[str, AfterValidator(func=<function <lambda>>)], task: kiln_ai.datamodel.Task) -> BasePromptBuilder:
451def prompt_builder_from_id(prompt_id: PromptId, task: Task) -> BasePromptBuilder:
452    """Convert a name used in the UI to the corresponding prompt builder class.
453
454    Args:
455        prompt_id (PromptId): The prompt ID.
456
457    Returns:
458        type[BasePromptBuilder]: The corresponding prompt builder class.
459
460    Raises:
461        ValueError: If the UI name is not recognized.
462    """
463
464    # Saved prompts are prefixed with "id::"
465    if prompt_id.startswith("id::"):
466        prompt_id = prompt_id[4:]
467        return SavedPromptBuilder(task, prompt_id)
468
469    # Task run config prompts are prefixed with "task_run_config::"
470    # task_run_config::[project_id]::[task_id]::[run_config_id]
471    if prompt_id.startswith("task_run_config::"):
472        return TaskRunConfigPromptBuilder(task, prompt_id)
473
474    # Fine-tune prompts are prefixed with "fine_tune_prompt::"
475    if prompt_id.startswith("fine_tune_prompt::"):
476        prompt_id = prompt_id[18:]
477        return FineTunePromptBuilder(task, prompt_id)
478
479    # Check if the prompt_id matches any enum value
480    if prompt_id not in [member.value for member in PromptGenerators]:
481        raise ValueError(f"Unknown prompt generator: {prompt_id}")
482    typed_prompt_generator = PromptGenerators(prompt_id)
483
484    match typed_prompt_generator:
485        case PromptGenerators.SIMPLE:
486            return SimplePromptBuilder(task)
487        case PromptGenerators.FEW_SHOT:
488            return FewShotPromptBuilder(task)
489        case PromptGenerators.MULTI_SHOT:
490            return MultiShotPromptBuilder(task)
491        case PromptGenerators.REPAIRS:
492            return RepairsPromptBuilder(task)
493        case PromptGenerators.SIMPLE_CHAIN_OF_THOUGHT:
494            return SimpleChainOfThoughtPromptBuilder(task)
495        case PromptGenerators.FEW_SHOT_CHAIN_OF_THOUGHT:
496            return FewShotChainOfThoughtPromptBuilder(task)
497        case PromptGenerators.MULTI_SHOT_CHAIN_OF_THOUGHT:
498            return MultiShotChainOfThoughtPromptBuilder(task)
499        case _:
500            # Type checking will find missing cases
501            raise_exhaustive_enum_error(typed_prompt_generator)

Convert a name used in the UI to the corresponding prompt builder class.

Args: prompt_id (PromptId): The prompt ID.

Returns: type[BasePromptBuilder]: The corresponding prompt builder class.

Raises: ValueError: If the UI name is not recognized.