Skip to content

pydantic_ai.direct

Methods for making imperative requests to language models with minimal abstraction.

These methods allow you to make requests to LLMs where the only abstraction is input and output schema translation so you can use all models with the same API.

These methods are thin wrappers around Model implementations.

StreamedResponseSync

Synchronous wrapper to async streaming responses by running the async producer in a background thread and providing a synchronous iterator.

This class must be used as a context manager with the with statement.

Attributes

response

Get the current state of the response.

Type: messages.ModelResponse

model_name

Get the model name of the response.

Type: str

timestamp

Get the timestamp of the response.

Type: datetime

Methods

__iter__
def __iter__() -> Iterator[messages.ModelResponseStreamEvent]

Stream the response as an iterable of ModelResponseStreamEvents.

Returns

Iterator[messages.ModelResponseStreamEvent]

get
def get() -> messages.ModelResponse

Build a ModelResponse from the data received from the stream so far.

Returns

messages.ModelResponse

usage
def usage() -> RequestUsage

Get the usage of the response so far.

Returns

RequestUsage

model_request

@async

def model_request(
    model: models.Model | models.KnownModelName | str,
    messages: Sequence[messages.ModelMessage],
    model_settings: settings.ModelSettings | None = None,
    model_request_parameters: models.ModelRequestParameters | None = None,
    instrument: instrumented_models.InstrumentationSettings | bool | None = None,
) -> messages.ModelResponse

Make a non-streamed request to a model.

model_request_example.py
from pydantic_ai import ModelRequest
from pydantic_ai.direct import model_request


async def main():
  model_response = await model_request(
      'anthropic:claude-haiku-4-5',
      [ModelRequest.user_text_prompt('What is the capital of France?')]  # (1)
  )
  print(model_response)
  '''
  ModelResponse(
      parts=[TextPart(content='The capital of France is Paris.')],
      usage=RequestUsage(input_tokens=56, output_tokens=7),
      model_name='claude-haiku-4-5',
      timestamp=datetime.datetime(...),
  )
  '''

Returns

messages.ModelResponse — The model response and token usage associated with the request.

Parameters

The model to make a request to. We allow str here since the actual list of allowed models changes frequently.

Messages to send to the model

model_settings : settings.ModelSettings | None Default: None

optional model settings

model_request_parameters : models.ModelRequestParameters | None Default: None

optional model request parameters

instrument : instrumented_models.InstrumentationSettings | bool | None Default: None

Whether to instrument the request with OpenTelemetry/Logfire, if None the value from logfire.instrument_pydantic_ai is used.

model_request_sync

def model_request_sync(
    model: models.Model | models.KnownModelName | str,
    messages: Sequence[messages.ModelMessage],
    model_settings: settings.ModelSettings | None = None,
    model_request_parameters: models.ModelRequestParameters | None = None,
    instrument: instrumented_models.InstrumentationSettings | bool | None = None,
) -> messages.ModelResponse

Make a Synchronous, non-streamed request to a model.

This is a convenience method that wraps model_request with loop.run_until_complete(...). You therefore can’t use this method inside async code or if there’s an active event loop.

model_request_sync_example.py
from pydantic_ai import ModelRequest
from pydantic_ai.direct import model_request_sync

model_response = model_request_sync(
  'anthropic:claude-haiku-4-5',
  [ModelRequest.user_text_prompt('What is the capital of France?')]  # (1)
)
print(model_response)
'''
ModelResponse(
  parts=[TextPart(content='The capital of France is Paris.')],
  usage=RequestUsage(input_tokens=56, output_tokens=7),
  model_name='claude-haiku-4-5',
  timestamp=datetime.datetime(...),
)
'''

Returns

messages.ModelResponse — The model response and token usage associated with the request.

Parameters

The model to make a request to. We allow str here since the actual list of allowed models changes frequently.

Messages to send to the model

model_settings : settings.ModelSettings | None Default: None

optional model settings

model_request_parameters : models.ModelRequestParameters | None Default: None

optional model request parameters

instrument : instrumented_models.InstrumentationSettings | bool | None Default: None

Whether to instrument the request with OpenTelemetry/Logfire, if None the value from logfire.instrument_pydantic_ai is used.

model_request_stream

def model_request_stream(
    model: models.Model | models.KnownModelName | str,
    messages: Sequence[messages.ModelMessage],
    model_settings: settings.ModelSettings | None = None,
    model_request_parameters: models.ModelRequestParameters | None = None,
    instrument: instrumented_models.InstrumentationSettings | bool | None = None,
) -> AbstractAsyncContextManager[models.StreamedResponse]

Make a streamed async request to a model.

model_request_stream_example.py
from pydantic_ai import ModelRequest
from pydantic_ai.direct import model_request_stream


async def main():
  messages = [ModelRequest.user_text_prompt('Who was Albert Einstein?')]  # (1)
  async with model_request_stream('openai:gpt-5-mini', messages) as stream:
      chunks = []
      async for chunk in stream:
          chunks.append(chunk)
      print(chunks)
      '''
      [
          PartStartEvent(index=0, part=TextPart(content='Albert Einstein was ')),
          FinalResultEvent(tool_name=None, tool_call_id=None),
          PartDeltaEvent(
              index=0, delta=TextPartDelta(content_delta='a German-born theoretical ')
          ),
          PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='physicist.')),
          PartEndEvent(
              index=0,
              part=TextPart(
                  content='Albert Einstein was a German-born theoretical physicist.'
              ),
          ),
      ]
      '''

Returns

AbstractAsyncContextManager[models.StreamedResponse] — A stream response async context manager.

Parameters

The model to make a request to. We allow str here since the actual list of allowed models changes frequently.

Messages to send to the model

model_settings : settings.ModelSettings | None Default: None

optional model settings

model_request_parameters : models.ModelRequestParameters | None Default: None

optional model request parameters

instrument : instrumented_models.InstrumentationSettings | bool | None Default: None

Whether to instrument the request with OpenTelemetry/Logfire, if None the value from logfire.instrument_pydantic_ai is used.

model_request_stream_sync

def model_request_stream_sync(
    model: models.Model | models.KnownModelName | str,
    messages: Sequence[messages.ModelMessage],
    model_settings: settings.ModelSettings | None = None,
    model_request_parameters: models.ModelRequestParameters | None = None,
    instrument: instrumented_models.InstrumentationSettings | bool | None = None,
) -> StreamedResponseSync

Make a streamed synchronous request to a model.

This is the synchronous version of model_request_stream. It uses threading to run the asynchronous stream in the background while providing a synchronous iterator interface.

model_request_stream_sync_example.py

from pydantic_ai import ModelRequest
from pydantic_ai.direct import model_request_stream_sync

messages = [ModelRequest.user_text_prompt('Who was Albert Einstein?')]
with model_request_stream_sync('openai:gpt-5-mini', messages) as stream:
    chunks = []
    for chunk in stream:
        chunks.append(chunk)
    print(chunks)
    '''
    [
        PartStartEvent(index=0, part=TextPart(content='Albert Einstein was ')),
        FinalResultEvent(tool_name=None, tool_call_id=None),
        PartDeltaEvent(
            index=0, delta=TextPartDelta(content_delta='a German-born theoretical ')
        ),
        PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='physicist.')),
        PartEndEvent(
            index=0,
            part=TextPart(
                content='Albert Einstein was a German-born theoretical physicist.'
            ),
        ),
    ]
    '''

Returns

StreamedResponseSync — A sync stream response context manager.

Parameters

The model to make a request to. We allow str here since the actual list of allowed models changes frequently.

Messages to send to the model

model_settings : settings.ModelSettings | None Default: None

optional model settings

model_request_parameters : models.ModelRequestParameters | None Default: None

optional model request parameters

instrument : instrumented_models.InstrumentationSettings | bool | None Default: None

Whether to instrument the request with OpenTelemetry/Logfire, if None the value from logfire.instrument_pydantic_ai is used.