Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions examples/llm_multi_turn_conversation.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ async def chat_turn(
history: list,
user_input: str,
model: og.TEE_LLM = og.TEE_LLM.GEMINI_2_5_FLASH,
) -> tuple[str, list, str]:
) -> tuple[str, list, str | None]:
"""
Execute a single conversation turn.

Expand All @@ -42,7 +42,7 @@ async def chat_turn(
model: TEE_LLM model to use.

Returns:
Tuple of (assistant_reply, updated_history, transaction_hash).
Tuple of (assistant_reply, updated_history, data_settlement_transaction_hash).
"""
history = add_user_message(history, user_input)

Expand All @@ -58,7 +58,7 @@ async def chat_turn(
reply = str(result.chat_output["content"])
history = add_assistant_message(history, reply)

return reply, history, result.transaction_hash
return reply, history, result.data_settlement_transaction_hash


async def main():
Expand Down
2 changes: 1 addition & 1 deletion examples/llm_tool_calling.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ async def main():

print(f"Finish reason: {result.finish_reason}")
print(f"Chat output: {result.chat_output}")
print(f"Transaction hash: {result.transaction_hash}")
print(f"Data settlement transaction hash: {result.data_settlement_transaction_hash}")


asyncio.run(main())
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@ dependencies = [
"langchain>=0.3.7",
"openai>=1.58.1",
"pydantic>=2.9.2",
"og-x402>=0.0.1.dev8",
"og-x402[extensions]>=0.0.1.dev8",
"og-x402>=0.0.2.dev1",
"og-x402[extensions]>=0.0.2.dev1",
Comment thread
adambalogh marked this conversation as resolved.
]

[project.optional-dependencies]
Expand Down
13 changes: 11 additions & 2 deletions src/opengradient/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -415,7 +415,11 @@ def completion(
)

print_llm_completion_result(
model_cid, completion_output.transaction_hash, completion_output.completion_output, is_vanilla=False, result=completion_output
model_cid,
completion_output.data_settlement_transaction_hash,
completion_output.completion_output,
is_vanilla=False,
result=completion_output,
)
Comment on lines 417 to 423
Copy link

Copilot AI Apr 27, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The CLI is now passing data_settlement_transaction_hash into print_llm_completion_result, but the printer still only shows a transaction hash for is_vanilla flows and otherwise prints just Source: OpenGradient TEE. If the goal is to surface individual settlement metadata in the generated CLI output, update the print path for TEE results to display the settlement tx hash (and blob id when available) as well.

Copilot uses AI. Check for mistakes.

except Exception as e:
Expand Down Expand Up @@ -603,7 +607,12 @@ def chat(
print_streaming_chat_result(model_cid, result, is_tee=True)
else:
print_llm_chat_result(
model_cid, result.transaction_hash, result.finish_reason, result.chat_output, is_vanilla=False, result=result
model_cid,
result.data_settlement_transaction_hash,
result.finish_reason,
result.chat_output,
is_vanilla=False,
result=result,
)
Copy link

Copilot AI Apr 27, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same as completion: the non-streaming chat CLI now forwards data_settlement_transaction_hash, but the output formatter for TEE mode does not print it. To match the PR intent, the CLI should display settlement tx hash / blob id (when present) for TEE chat results too.

Suggested change
)
)
settlement_tx_hash = getattr(result, "data_settlement_transaction_hash", None)
settlement_blob_id = getattr(result, "data_settlement_blob_id", None)
if settlement_tx_hash:
click.echo(f"Settlement tx hash: {settlement_tx_hash}")
if settlement_blob_id:
click.echo(f"Settlement blob id: {settlement_blob_id}")

Copilot uses AI. Check for mistakes.

except Exception as e:
Expand Down
14 changes: 11 additions & 3 deletions src/opengradient/client/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@
DEFAULT_TEE_REGISTRY_ADDRESS = "0x4e72238852f3c918f4E4e57AeC9280dDB0c80248"

X402_PROCESSING_HASH_HEADER = "x-processing-hash"
X402_DATA_SETTLEMENT_TX_HASH_HEADER = "x-settlement-tx-hash"
X402_DATA_SETTLEMENT_BLOB_ID_HEADER = "x-settlement-walrus-blob-id"
X402_PLACEHOLDER_API_KEY = "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"
BASE_MAINNET_NETWORK = "eip155:8453"
BASE_MAINNET_RPC = os.getenv("BASE_MAINNET_RPC", "https://base-rpc.publicnode.com")
Expand Down Expand Up @@ -285,7 +287,8 @@ async def _request() -> TextGenerationOutput:
raw_body = await response.aread()
result = json.loads(raw_body.decode())
return TextGenerationOutput(
transaction_hash="external",
data_settlement_transaction_hash=response.headers.get(X402_DATA_SETTLEMENT_TX_HASH_HEADER),
data_settlement_blob_id=response.headers.get(X402_DATA_SETTLEMENT_BLOB_ID_HEADER),
completion_output=result.get("completion"),
Comment on lines 289 to 292
Copy link

Copilot AI Apr 27, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

New behavior: the completion response now populates data_settlement_transaction_hash and data_settlement_blob_id from HTTP response headers. There are existing unit tests for LLM.completion, but none assert the header-to-output mapping when these headers are present—adding a test that sets these headers on the fake response would prevent regressions.

Copilot uses AI. Check for mistakes.
tee_signature=result.get("tee_signature"),
tee_timestamp=result.get("tee_timestamp"),
Expand Down Expand Up @@ -337,7 +340,7 @@ async def chat(

Returns:
Union[TextGenerationOutput, AsyncGenerator[StreamChunk, None]]:
- If stream=False: TextGenerationOutput with chat_output, transaction_hash, finish_reason, and payment_hash
- If stream=False: TextGenerationOutput with chat_output, data settlement metadata, finish_reason, and payment_hash
- If stream=True: Async generator yielding StreamChunk objects

Raises:
Expand Down Expand Up @@ -408,7 +411,8 @@ async def _request() -> TextGenerationOutput:
).strip()

return TextGenerationOutput(
transaction_hash="external",
data_settlement_transaction_hash=response.headers.get(X402_DATA_SETTLEMENT_TX_HASH_HEADER),
data_settlement_blob_id=response.headers.get(X402_DATA_SETTLEMENT_BLOB_ID_HEADER),
finish_reason=choices[0].get("finish_reason"),
chat_output=message,
usage=result.get("usage"),
Expand Down Expand Up @@ -447,6 +451,8 @@ async def _chat_tools_as_stream(self, params: _ChatParams, messages: List[Dict])
tee_id=result.tee_id,
tee_endpoint=result.tee_endpoint,
tee_payment_address=result.tee_payment_address,
data_settlement_transaction_hash=result.data_settlement_transaction_hash,
data_settlement_blob_id=result.data_settlement_blob_id,
)

async def _chat_stream(self, params: _ChatParams, messages: List[Dict]) -> AsyncGenerator[StreamChunk, None]:
Expand Down Expand Up @@ -535,6 +541,8 @@ async def _parse_sse_response(self, response, tee) -> AsyncGenerator[StreamChunk

chunk = StreamChunk.from_sse_data(data)
if chunk.is_final:
chunk.data_settlement_transaction_hash = response.headers.get(X402_DATA_SETTLEMENT_TX_HASH_HEADER)
chunk.data_settlement_blob_id = response.headers.get(X402_DATA_SETTLEMENT_BLOB_ID_HEADER)
Comment on lines 543 to +545
Copy link

Copilot AI Apr 27, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

New behavior: the final streaming StreamChunk is mutated to include data_settlement_transaction_hash / data_settlement_blob_id from the response headers. The streaming tests currently validate TEE metadata on the final chunk but don't validate these new settlement fields—please add coverage to ensure they’re set correctly when the headers are provided.

Copilot uses AI. Check for mistakes.
chunk.tee_id = tee.tee_id
chunk.tee_endpoint = tee.endpoint
chunk.tee_payment_address = tee.payment_address
Expand Down
1 change: 0 additions & 1 deletion src/opengradient/client/twins.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,6 @@ def chat(
raise RuntimeError(f"Invalid response: 'choices' missing or empty in {result}")

return TextGenerationOutput(
transaction_hash="",
finish_reason=choices[0].get("finish_reason"),
chat_output=choices[0].get("message"),
payment_hash=None,
Expand Down
29 changes: 19 additions & 10 deletions src/opengradient/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -237,6 +237,10 @@ class StreamChunk:
tee_id: On-chain TEE registry ID of the enclave that served this request (final chunk only)
tee_endpoint: Endpoint URL of the TEE that served this request (final chunk only)
tee_payment_address: Payment address registered for the TEE (final chunk only)
data_settlement_transaction_hash: Transaction hash for the data settlement
transaction, present on the final chunk when available.
data_settlement_blob_id: Walrus blob ID for individual data settlement,
present on the final chunk when available.
"""

choices: List[StreamChoice]
Expand All @@ -248,6 +252,8 @@ class StreamChunk:
tee_id: Optional[str] = None
tee_endpoint: Optional[str] = None
tee_payment_address: Optional[str] = None
data_settlement_transaction_hash: Optional[str] = None
data_settlement_blob_id: Optional[str] = None

@classmethod
def from_sse_data(cls, data: Dict) -> "StreamChunk":
Expand Down Expand Up @@ -400,8 +406,12 @@ class TextGenerationOutput:
performed inside a TEE enclave.

Attributes:
transaction_hash: Blockchain transaction hash. Set to
``"external"`` for TEE-routed providers.
data_settlement_transaction_hash: Blockchain transaction hash for
the data settlement transaction. ``None`` when the provider
does not return data settlement metadata.
data_settlement_blob_id: Walrus blob ID for individual data
settlement. ``None`` for private/batch settlement or when the
provider does not return it.
finish_reason: Reason the model stopped generating
(e.g. ``"stop"``, ``"tool_call"``, ``"error"``).
Only populated for chat requests.
Expand All @@ -416,8 +426,11 @@ class TextGenerationOutput:
time.
"""

transaction_hash: str
"""Blockchain transaction hash. Set to ``"external"`` for TEE-routed providers."""
data_settlement_transaction_hash: Optional[str] = None
"""Blockchain transaction hash for the data settlement transaction. ``None`` when unavailable."""

data_settlement_blob_id: Optional[str] = None
"""Walrus blob ID for individual data settlement. ``None`` when unavailable."""

finish_reason: Optional[str] = None
"""Reason the model stopped generating (e.g. ``"stop"``, ``"tool_call"``, ``"error"``). Only populated for chat requests."""
Expand Down Expand Up @@ -580,13 +593,9 @@ class ResponseFormat:
def __post_init__(self) -> None:
valid_types = ("text", "json_object", "json_schema")
if self.type not in valid_types:
raise ValueError(
f"ResponseFormat.type must be one of {valid_types}, got '{self.type}'"
)
raise ValueError(f"ResponseFormat.type must be one of {valid_types}, got '{self.type}'")
if self.type == "json_schema" and not self.json_schema:
raise ValueError(
"ResponseFormat.json_schema is required when type='json_schema'"
)
raise ValueError("ResponseFormat.json_schema is required when type='json_schema'")

def to_dict(self) -> Dict:
"""Serialise to a JSON-compatible dict for the TEE gateway request payload."""
Expand Down
18 changes: 9 additions & 9 deletions tests/langchain_adapter_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def test_identifying_params(self, model):
class TestGenerate:
def test_text_response(self, model, mock_llm_client):
mock_llm_client.chat.return_value = TextGenerationOutput(
transaction_hash="external",
data_settlement_transaction_hash="external",
finish_reason="stop",
chat_output={"role": "assistant", "content": "Hello there!"},
)
Expand All @@ -91,7 +91,7 @@ def test_text_response(self, model, mock_llm_client):

async def test_async_text_response(self, model, mock_llm_client):
mock_llm_client.chat.return_value = TextGenerationOutput(
transaction_hash="external",
data_settlement_transaction_hash="external",
finish_reason="stop",
chat_output={"role": "assistant", "content": "Hello async!"},
)
Expand All @@ -103,7 +103,7 @@ async def test_async_text_response(self, model, mock_llm_client):

def test_tool_call_response_flat_format(self, model, mock_llm_client):
mock_llm_client.chat.return_value = TextGenerationOutput(
transaction_hash="external",
data_settlement_transaction_hash="external",
finish_reason="tool_call",
chat_output={
"role": "assistant",
Expand All @@ -129,7 +129,7 @@ def test_tool_call_response_flat_format(self, model, mock_llm_client):

def test_tool_call_response_nested_format(self, model, mock_llm_client):
mock_llm_client.chat.return_value = TextGenerationOutput(
transaction_hash="external",
data_settlement_transaction_hash="external",
finish_reason="tool_call",
chat_output={
"role": "assistant",
Expand Down Expand Up @@ -158,7 +158,7 @@ def test_tool_call_response_nested_format(self, model, mock_llm_client):

def test_content_as_list_of_blocks(self, model, mock_llm_client):
mock_llm_client.chat.return_value = TextGenerationOutput(
transaction_hash="external",
data_settlement_transaction_hash="external",
finish_reason="stop",
chat_output={
"role": "assistant",
Expand All @@ -172,7 +172,7 @@ def test_content_as_list_of_blocks(self, model, mock_llm_client):

def test_empty_chat_output(self, model, mock_llm_client):
mock_llm_client.chat.return_value = TextGenerationOutput(
transaction_hash="external",
data_settlement_transaction_hash="external",
finish_reason="stop",
chat_output=None,
)
Expand All @@ -185,7 +185,7 @@ def test_empty_chat_output(self, model, mock_llm_client):
class TestMessageConversion:
def test_converts_all_message_types(self, model, mock_llm_client):
mock_llm_client.chat.return_value = TextGenerationOutput(
transaction_hash="external",
data_settlement_transaction_hash="external",
finish_reason="stop",
chat_output={"role": "assistant", "content": "ok"},
)
Expand Down Expand Up @@ -224,7 +224,7 @@ def test_unsupported_message_type_raises(self, model):

def test_passes_correct_params_to_client(self, model, mock_llm_client):
mock_llm_client.chat.return_value = TextGenerationOutput(
transaction_hash="external",
data_settlement_transaction_hash="external",
finish_reason="stop",
chat_output={"role": "assistant", "content": "ok"},
)
Expand Down Expand Up @@ -299,7 +299,7 @@ def test_bind_tool_choice(self, model):

def test_tools_used_in_generate(self, model, mock_llm_client):
mock_llm_client.chat.return_value = TextGenerationOutput(
transaction_hash="external",
data_settlement_transaction_hash="external",
finish_reason="stop",
chat_output={"role": "assistant", "content": "ok"},
)
Expand Down
3 changes: 2 additions & 1 deletion tests/llm_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@ class _FakeStreamResponse:
def __init__(self, status_code: int, chunks: List[bytes]):
self.status_code = status_code
self._chunks = chunks
self.headers: Dict[str, str] = {}

async def aiter_raw(self):
for chunk in self._chunks:
Expand Down Expand Up @@ -169,7 +170,7 @@ async def test_returns_completion_output(self, fake_http):
assert result.completion_output == "Hello world"
assert result.tee_signature == "sig-abc"
assert result.tee_timestamp == "2025-01-01T00:00:00Z"
assert result.transaction_hash == "external"
assert result.data_settlement_transaction_hash is None
assert result.tee_id == "test-tee-id"
assert result.tee_payment_address == "0xTestPayment"

Expand Down
10 changes: 5 additions & 5 deletions uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading