Skip to content

Chat SDK

The Chat SDK allows you to have conversations with Grok in a stateless fashion. Stateless in this context means that conversations are not stored on the server and every request to the API contains the entire conversation history. Use this SDK to implement custom applications that are based on Grok.

Getting started

To get started with the Chat SDK, create a new client and access its chat property:

import xai_sdk

client = xai_sdk.Client()
chat = client.chat

Turn-based conversations

To start a new conversation with Grok via the Chat SDK, call the create_conversation() method followed by the Conversation.add_response() method.

simple_chat.py
"""A simple example demonstrating text completion."""

import asyncio
import sys

import xai_sdk


async def main():
    """Runs the example."""
    client = xai_sdk.Client()

    conversation = client.chat.create_conversation()

    print("Enter an empty message to quit.\n")

    while True:
        user_input = input("Human: ")
        print("")

        if not user_input:
            return

        token_stream, _ = conversation.add_response(user_input)
        print("Grok: ", end="")
        async for token in token_stream:
            print(token, end="")
            sys.stdout.flush()
        print("\n")


asyncio.run(main())

About streaming

The add_response function returns a tuple of the form token_stream, final_response where token_stream is an async iterable and final_response is a future that evaluates to the final response once it has fully been generated. Using token_stream is entirely optional, and it's only used when you want to visualize the token-generation process in real time. Note that final_response won't resolve unless token_stream is used. If you are only interested in the final response, you can use the auxiliary method add_response_no_stream instead.

simple_chat_no_stream.py
"""A simple example demonstrating text completion without using streams."""

import asyncio

import xai_sdk


async def main():
    """Runs the example."""
    client = xai_sdk.Client()

    conversation = client.chat.create_conversation()

    print("Enter an empty message to quit.\n")

    while True:
        user_input = input("Human: ")
        print("")

        if not user_input:
            return

        response = await conversation.add_response_no_stream(user_input)
        print(f"Grok: {response.message}\n")


asyncio.run(main())

API reference

xai_sdk.chat.AsyncChat

Provides a simple chat API that can be used for products.

Source code in xai_sdk/chat.py
class AsyncChat:
    """Provides a simple chat API that can be used for products."""

    def __init__(self, stub: stateless_chat_pb2_grpc.StatelessChatStub) -> None:
        """Initializes a new instance of the `AsyncChat` class.

        Args:
            stub: Stub used to communicate with the gRPC API.
        """
        self._stub = stub

    def create_conversation(
        self, fun_mode: bool = False, disable_search: bool = False, model_name: str = ""
    ) -> "Conversation":
        """Creates a new empty conversation.

        Args:
            fun_mode: Whether fun mode shall be enabled for this conversation.
            disable_search: If true, Grok will not search X for context. This means Grok won't be
                able to answer questions that require realtime information.
            model_name: Name of the model use it. If empty, the default model will be used.

        Returns:
            Newly created conversation.
        """
        return Conversation(self._stub, fun_mode, disable_search, model_name)

xai_sdk.chat.AsyncChat.create_conversation(fun_mode=False, disable_search=False, model_name='')

Creates a new empty conversation.

Parameters:

Name Type Description Default
fun_mode bool

Whether fun mode shall be enabled for this conversation.

False
disable_search bool

If true, Grok will not search X for context. This means Grok won't be able to answer questions that require realtime information.

False
model_name str

Name of the model use it. If empty, the default model will be used.

''

Returns:

Type Description
Conversation

Newly created conversation.

Source code in xai_sdk/chat.py
def create_conversation(
    self, fun_mode: bool = False, disable_search: bool = False, model_name: str = ""
) -> "Conversation":
    """Creates a new empty conversation.

    Args:
        fun_mode: Whether fun mode shall be enabled for this conversation.
        disable_search: If true, Grok will not search X for context. This means Grok won't be
            able to answer questions that require realtime information.
        model_name: Name of the model use it. If empty, the default model will be used.

    Returns:
        Newly created conversation.
    """
    return Conversation(self._stub, fun_mode, disable_search, model_name)

xai_sdk.chat.Conversation

A conversation held via the stateless Chat API.

Source code in xai_sdk/chat.py
class Conversation:
    """A conversation held via the stateless Chat API."""

    def __init__(
        self,
        stub: stateless_chat_pb2_grpc.StatelessChatStub,
        fun_mode: bool,
        disable_search: bool,
        model_name: str,
    ):
        """Initializes a new instance of the `Conversation` class.

        Args:
            stub: Stub used to communicate with the gRPC API.
            fun_mode: If true, Grok will respond in fun mode.
            disable_search: If true, Grok will not search X for context. This means Grok won't be
                able to answer questions that require realtime information.
            model_name: Name of the model use it. If empty, the default model will be used.
        """
        self._stub = stub
        self._conversation_id = uuid.uuid4().hex

        self._conversation = stateless_chat_pb2.StatelessConversation(
            stateless_conversation_id=self._conversation_id,
            responses=[],
            system_prompt_name="fun" if fun_mode else "",
            disable_search=disable_search,
            model_name=model_name,
            include_x_posts=True,
            x_posts_as_field=True,
        )

    @property
    def history(self) -> Sequence[stateless_chat_pb2.StatelessResponse]:
        """Returns the linear conversation history."""
        return self._conversation.responses

    @property
    def fun_mode(self) -> bool:
        """Returns true if the conversation happens in fun mode."""
        return self._conversation.system_prompt_name == "fun"

    async def add_response_no_stream(
        self, user_message: str, *, image_inputs: Sequence[str] = ()
    ) -> stateless_chat_pb2.StatelessResponse:
        """Same as `add_response` but doesn't return a token stream.

        Use this function if you are only interested in the complete response and don't need to
        stream the individual tokens.

        Args:
            user_message: Message the user has entered.
            image_inputs: A list of base64-encoded images that are attached to the response.

        Returns:
            The newly generated response.
        """
        stream, response = self.add_response(user_message, image_inputs=image_inputs)

        # We have to iterate over the stream to generate the final response.
        async for _ in stream:
            pass

        response = await response
        return response

    def add_response(
        self, user_message: str, *, image_inputs: Sequence[str] = ()
    ) -> tuple[AsyncGenerator[str, None], asyncio.Future[stateless_chat_pb2.StatelessResponse]]:
        """Adds a new user response to the conversation and samples a model response in return.

        Args:
            user_message: Message the user has entered.
            image_inputs: A list of base64-encoded images that are attached to the response.

        Returns:
            A tuple of the form `token_stream, response` where `token_stream` is an async iterable
                that emits the individual string tokens of the newly sampled response and `response`
                is a future that resolves to the Response object created.
        """
        self._conversation.responses.append(
            stateless_chat_pb2.StatelessResponse(
                sender=stateless_chat_pb2.StatelessResponse.Sender.HUMAN,
                message=user_message,
                image_inputs=image_inputs,
            )
        )

        response_future: asyncio.Future[stateless_chat_pb2.StatelessResponse] = asyncio.Future()

        async def _unroll_tokens():
            """Unrolls the token stream."""
            try:
                response = stateless_chat_pb2.StatelessResponse(
                    sender=stateless_chat_pb2.StatelessResponse.Sender.ASSISTANT,
                    message="",
                    query="",
                )

                async for update in self._stub.AddResponse(self._conversation):
                    if update.message:
                        response.message += update.message
                        yield update.message

                    if update.query:
                        response.query += update.query

                    if update.debug_log:
                        response.debug_log.MergeFrom(update.debug_log)

                    if len(update.web_search_results.results) > 0:
                        response.web_search_results.CopyFrom(update.web_search_results)

                    if update.search_context:
                        response.search_context.MergeFrom(update.search_context)

                self._conversation.responses.append(response)
                response_future.set_result(response)
            except Exception as e:
                response_future.set_exception(e)

        return _unroll_tokens(), response_future

xai_sdk.chat.Conversation.fun_mode: bool property

Returns true if the conversation happens in fun mode.

xai_sdk.chat.Conversation.history: Sequence[stateless_chat_pb2.StatelessResponse] property

Returns the linear conversation history.

xai_sdk.chat.Conversation.add_response(user_message, *, image_inputs=())

Adds a new user response to the conversation and samples a model response in return.

Parameters:

Name Type Description Default
user_message str

Message the user has entered.

required
image_inputs Sequence[str]

A list of base64-encoded images that are attached to the response.

()

Returns:

Type Description
tuple[AsyncGenerator[str, None], Future[StatelessResponse]]

A tuple of the form token_stream, response where token_stream is an async iterable that emits the individual string tokens of the newly sampled response and response is a future that resolves to the Response object created.

Source code in xai_sdk/chat.py
def add_response(
    self, user_message: str, *, image_inputs: Sequence[str] = ()
) -> tuple[AsyncGenerator[str, None], asyncio.Future[stateless_chat_pb2.StatelessResponse]]:
    """Adds a new user response to the conversation and samples a model response in return.

    Args:
        user_message: Message the user has entered.
        image_inputs: A list of base64-encoded images that are attached to the response.

    Returns:
        A tuple of the form `token_stream, response` where `token_stream` is an async iterable
            that emits the individual string tokens of the newly sampled response and `response`
            is a future that resolves to the Response object created.
    """
    self._conversation.responses.append(
        stateless_chat_pb2.StatelessResponse(
            sender=stateless_chat_pb2.StatelessResponse.Sender.HUMAN,
            message=user_message,
            image_inputs=image_inputs,
        )
    )

    response_future: asyncio.Future[stateless_chat_pb2.StatelessResponse] = asyncio.Future()

    async def _unroll_tokens():
        """Unrolls the token stream."""
        try:
            response = stateless_chat_pb2.StatelessResponse(
                sender=stateless_chat_pb2.StatelessResponse.Sender.ASSISTANT,
                message="",
                query="",
            )

            async for update in self._stub.AddResponse(self._conversation):
                if update.message:
                    response.message += update.message
                    yield update.message

                if update.query:
                    response.query += update.query

                if update.debug_log:
                    response.debug_log.MergeFrom(update.debug_log)

                if len(update.web_search_results.results) > 0:
                    response.web_search_results.CopyFrom(update.web_search_results)

                if update.search_context:
                    response.search_context.MergeFrom(update.search_context)

            self._conversation.responses.append(response)
            response_future.set_result(response)
        except Exception as e:
            response_future.set_exception(e)

    return _unroll_tokens(), response_future

xai_sdk.chat.Conversation.add_response_no_stream(user_message, *, image_inputs=()) async

Same as add_response but doesn't return a token stream.

Use this function if you are only interested in the complete response and don't need to stream the individual tokens.

Parameters:

Name Type Description Default
user_message str

Message the user has entered.

required
image_inputs Sequence[str]

A list of base64-encoded images that are attached to the response.

()

Returns:

Type Description
StatelessResponse

The newly generated response.

Source code in xai_sdk/chat.py
async def add_response_no_stream(
    self, user_message: str, *, image_inputs: Sequence[str] = ()
) -> stateless_chat_pb2.StatelessResponse:
    """Same as `add_response` but doesn't return a token stream.

    Use this function if you are only interested in the complete response and don't need to
    stream the individual tokens.

    Args:
        user_message: Message the user has entered.
        image_inputs: A list of base64-encoded images that are attached to the response.

    Returns:
        The newly generated response.
    """
    stream, response = self.add_response(user_message, image_inputs=image_inputs)

    # We have to iterate over the stream to generate the final response.
    async for _ in stream:
        pass

    response = await response
    return response