kiln_ai.adapters.repair.repair_task

 1import json
 2
 3from pydantic import BaseModel, Field
 4
 5from kiln_ai.adapters.prompt_builders import BasePromptBuilder, prompt_builder_from_id
 6from kiln_ai.datamodel import Priority, Project, Task, TaskRequirement, TaskRun
 7
 8
 9# We should add evaluator rating
10class RepairTaskInput(BaseModel):
11    original_prompt: str
12    original_input: str
13    original_output: str
14    evaluator_feedback: str = Field(
15        min_length=1,
16        description="Feedback from an evaluator on how to repair the task run.",
17    )
18
19
20class RepairTaskRun(Task, parent_of={}):
21    def __init__(self, original_task: Task):
22        # Keep the typechecker happy
23        tmp_project = Project(name="Repair")
24        super().__init__(
25            name="Repair",
26            parent=tmp_project,
27            description="Repair a task run, given feedback from an evaluator about how the response can be improved.",
28            instruction="You are an assistant which helps improve output from another assistant (original assistant). You'll be provided a task that the original assistant executed (prompt), \
29the input it was given, and the output it generated. An evaluator has determined that the output it generated did not satisfy the task and should be improved. The evaluator will provide \
30feedback describing what should be improved. Your job is to understand the evaluator's feedback and improve the response.",
31            requirements=[
32                TaskRequirement(
33                    name="Follow Eval Feedback",
34                    instruction="The evaluator's feedback is the most important thing to consider. If it conflicts with the original task instruction or prompt, prioritize the evaluator's feedback.",
35                    priority=Priority.p0,
36                )
37            ],
38            input_json_schema=json.dumps(RepairTaskInput.model_json_schema()),
39            output_json_schema=original_task.output_json_schema,
40        )
41
42    @classmethod
43    def _original_prompt(cls, run: TaskRun, task: Task) -> str:
44        if run.output.source is None or run.output.source.properties is None:
45            raise ValueError("No source properties found")
46
47        # Get the prompt builder id. Need the second check because we used to store this in a prompt_builder_name field, so loading legacy runs will need this.
48        prompt_id = run.output.source.properties.get(
49            "prompt_id"
50        ) or run.output.source.properties.get("prompt_builder_name", None)
51        if prompt_id is not None and isinstance(prompt_id, str):
52            prompt_builder = prompt_builder_from_id(prompt_id, task)
53            if isinstance(prompt_builder, BasePromptBuilder):
54                return prompt_builder.build_prompt(include_json_instructions=False)
55
56        raise ValueError(f"Prompt builder '{prompt_id}' is not a valid prompt builder")
57
58    @classmethod
59    def build_repair_task_input(
60        cls, original_task: Task, task_run: TaskRun, evaluator_feedback: str
61    ) -> RepairTaskInput:
62        original_prompt = cls._original_prompt(task_run, original_task)
63        return RepairTaskInput(
64            original_prompt=original_prompt,
65            original_input=task_run.input,
66            original_output=task_run.output.output,
67            evaluator_feedback=evaluator_feedback,
68        )
class RepairTaskInput(pydantic.main.BaseModel):
11class RepairTaskInput(BaseModel):
12    original_prompt: str
13    original_input: str
14    original_output: str
15    evaluator_feedback: str = Field(
16        min_length=1,
17        description="Feedback from an evaluator on how to repair the task run.",
18    )

!!! abstract "Usage Documentation" Models

A base class for creating Pydantic models.

Attributes: __class_vars__: The names of the class variables defined on the model. __private_attributes__: Metadata about the private attributes of the model. __signature__: The synthesized __init__ [Signature][inspect.Signature] of the model.

__pydantic_complete__: Whether model building is completed, or if there are still undefined fields.
__pydantic_core_schema__: The core schema of the model.
__pydantic_custom_init__: Whether the model has a custom `__init__` function.
__pydantic_decorators__: Metadata containing the decorators defined on the model.
    This replaces `Model.__validators__` and `Model.__root_validators__` from Pydantic V1.
__pydantic_generic_metadata__: Metadata for generic models; contains data used for a similar purpose to
    __args__, __origin__, __parameters__ in typing-module generics. May eventually be replaced by these.
__pydantic_parent_namespace__: Parent namespace of the model, used for automatic rebuilding of models.
__pydantic_post_init__: The name of the post-init method for the model, if defined.
__pydantic_root_model__: Whether the model is a [`RootModel`][pydantic.root_model.RootModel].
__pydantic_serializer__: The `pydantic-core` `SchemaSerializer` used to dump instances of the model.
__pydantic_validator__: The `pydantic-core` `SchemaValidator` used to validate instances of the model.

__pydantic_fields__: A dictionary of field names and their corresponding [`FieldInfo`][pydantic.fields.FieldInfo] objects.
__pydantic_computed_fields__: A dictionary of computed field names and their corresponding [`ComputedFieldInfo`][pydantic.fields.ComputedFieldInfo] objects.

__pydantic_extra__: A dictionary containing extra values, if [`extra`][pydantic.config.ConfigDict.extra]
    is set to `'allow'`.
__pydantic_fields_set__: The names of fields explicitly set during instantiation.
__pydantic_private__: Values of private attributes set on the model instance.
original_prompt: str
original_input: str
original_output: str
evaluator_feedback: str
model_config: ClassVar[pydantic.config.ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class RepairTaskRun(kiln_ai.datamodel.task.Task):
21class RepairTaskRun(Task, parent_of={}):
22    def __init__(self, original_task: Task):
23        # Keep the typechecker happy
24        tmp_project = Project(name="Repair")
25        super().__init__(
26            name="Repair",
27            parent=tmp_project,
28            description="Repair a task run, given feedback from an evaluator about how the response can be improved.",
29            instruction="You are an assistant which helps improve output from another assistant (original assistant). You'll be provided a task that the original assistant executed (prompt), \
30the input it was given, and the output it generated. An evaluator has determined that the output it generated did not satisfy the task and should be improved. The evaluator will provide \
31feedback describing what should be improved. Your job is to understand the evaluator's feedback and improve the response.",
32            requirements=[
33                TaskRequirement(
34                    name="Follow Eval Feedback",
35                    instruction="The evaluator's feedback is the most important thing to consider. If it conflicts with the original task instruction or prompt, prioritize the evaluator's feedback.",
36                    priority=Priority.p0,
37                )
38            ],
39            input_json_schema=json.dumps(RepairTaskInput.model_json_schema()),
40            output_json_schema=original_task.output_json_schema,
41        )
42
43    @classmethod
44    def _original_prompt(cls, run: TaskRun, task: Task) -> str:
45        if run.output.source is None or run.output.source.properties is None:
46            raise ValueError("No source properties found")
47
48        # Get the prompt builder id. Need the second check because we used to store this in a prompt_builder_name field, so loading legacy runs will need this.
49        prompt_id = run.output.source.properties.get(
50            "prompt_id"
51        ) or run.output.source.properties.get("prompt_builder_name", None)
52        if prompt_id is not None and isinstance(prompt_id, str):
53            prompt_builder = prompt_builder_from_id(prompt_id, task)
54            if isinstance(prompt_builder, BasePromptBuilder):
55                return prompt_builder.build_prompt(include_json_instructions=False)
56
57        raise ValueError(f"Prompt builder '{prompt_id}' is not a valid prompt builder")
58
59    @classmethod
60    def build_repair_task_input(
61        cls, original_task: Task, task_run: TaskRun, evaluator_feedback: str
62    ) -> RepairTaskInput:
63        original_prompt = cls._original_prompt(task_run, original_task)
64        return RepairTaskInput(
65            original_prompt=original_prompt,
66            original_input=task_run.input,
67            original_output=task_run.output.output,
68            evaluator_feedback=evaluator_feedback,
69        )

Represents a specific task to be performed, with associated requirements and validation rules.

Contains the task definition, requirements, input/output schemas, and maintains a collection of task runs.

RepairTaskRun(original_task: kiln_ai.datamodel.Task)
22    def __init__(self, original_task: Task):
23        # Keep the typechecker happy
24        tmp_project = Project(name="Repair")
25        super().__init__(
26            name="Repair",
27            parent=tmp_project,
28            description="Repair a task run, given feedback from an evaluator about how the response can be improved.",
29            instruction="You are an assistant which helps improve output from another assistant (original assistant). You'll be provided a task that the original assistant executed (prompt), \
30the input it was given, and the output it generated. An evaluator has determined that the output it generated did not satisfy the task and should be improved. The evaluator will provide \
31feedback describing what should be improved. Your job is to understand the evaluator's feedback and improve the response.",
32            requirements=[
33                TaskRequirement(
34                    name="Follow Eval Feedback",
35                    instruction="The evaluator's feedback is the most important thing to consider. If it conflicts with the original task instruction or prompt, prioritize the evaluator's feedback.",
36                    priority=Priority.p0,
37                )
38            ],
39            input_json_schema=json.dumps(RepairTaskInput.model_json_schema()),
40            output_json_schema=original_task.output_json_schema,
41        )

Create a new model by parsing and validating input data from keyword arguments.

Raises [ValidationError][pydantic_core.ValidationError] if the input data cannot be validated to form a valid model.

self is explicitly positional-only to allow self as a field name.

@classmethod
def build_repair_task_input( cls, original_task: kiln_ai.datamodel.Task, task_run: kiln_ai.datamodel.TaskRun, evaluator_feedback: str) -> RepairTaskInput:
59    @classmethod
60    def build_repair_task_input(
61        cls, original_task: Task, task_run: TaskRun, evaluator_feedback: str
62    ) -> RepairTaskInput:
63        original_prompt = cls._original_prompt(task_run, original_task)
64        return RepairTaskInput(
65            original_prompt=original_prompt,
66            original_input=task_run.input,
67            original_output=task_run.output.output,
68            evaluator_feedback=evaluator_feedback,
69        )
model_config = {'validate_assignment': True}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

def model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None:
337def init_private_attributes(self: BaseModel, context: Any, /) -> None:
338    """This function is meant to behave like a BaseModel method to initialise private attributes.
339
340    It takes context as an argument since that's what pydantic-core passes when calling it.
341
342    Args:
343        self: The BaseModel instance.
344        context: The context.
345    """
346    if getattr(self, '__pydantic_private__', None) is None:
347        pydantic_private = {}
348        for name, private_attr in self.__private_attributes__.items():
349            default = private_attr.get_default()
350            if default is not PydanticUndefined:
351                pydantic_private[name] = default
352        object_setattr(self, '__pydantic_private__', pydantic_private)

This function is meant to behave like a BaseModel method to initialise private attributes.

It takes context as an argument since that's what pydantic-core passes when calling it.

Args: self: The BaseModel instance. context: The context.