Download OpenAPI specification:
A proxy to verify App Attest/FxA payloads and proxy requests through LiteLLM.
Authorize first using App Attest or FxA. Pass the authorization header containing either the FxA token or the App Attest data JWT
| authorization required | string (Authorization) |
| service-type required | string (ServiceType) Enum: "ai" "s2s" "memories" |
Use-App-Attest (boolean) or Use-App-Attest (null) (Use-App-Attest) | |
Use-Qa-Certificates (boolean) or Use-Qa-Certificates (null) (Use-Qa-Certificates) |
Stream (boolean) or Stream (null) (Stream) Default: false | |
Array of objects (Messages) Default: [] | |
Model (string) or Model (null) (Model) Default: "openai/gpt-4o" | |
Temperature (number) or Temperature (null) (Temperature) Default: 0.1 | |
Max Completion Tokens (integer) or Max Completion Tokens (null) (Max Completion Tokens) Default: 1024 | |
Top P (number) or Top P (null) (Top P) Default: 0.01 | |
Mock Response (string) or Mock Response (null) (Mock Response) |
{- "stream": false,
- "messages": [ ],
- "model": "openai/gpt-4o",
- "temperature": 0.1,
- "max_completion_tokens": 1024,
- "top_p": 0.01,
- "mock_response": "string"
}nullMock LiteLLM endpoint with simulated latency.
| authorization required | string (Authorization) |
| service-type required | string (ServiceType) Enum: "ai" "s2s" "memories" |
Use-App-Attest (boolean) or Use-App-Attest (null) (Use-App-Attest) | |
Use-Qa-Certificates (boolean) or Use-Qa-Certificates (null) (Use-Qa-Certificates) |
Stream (boolean) or Stream (null) (Stream) Default: false | |
Array of objects (Messages) Default: [] | |
Model (string) or Model (null) (Model) Default: "openai/gpt-4o" | |
Temperature (number) or Temperature (null) (Temperature) Default: 0.1 | |
Max Completion Tokens (integer) or Max Completion Tokens (null) (Max Completion Tokens) Default: 1024 | |
Top P (number) or Top P (null) (Top P) Default: 0.01 | |
Mock Response (string) or Mock Response (null) (Mock Response) |
{- "stream": false,
- "messages": [ ],
- "model": "openai/gpt-4o",
- "temperature": 0.1,
- "max_completion_tokens": 1024,
- "top_p": 0.01,
- "mock_response": "string"
}nullMock LiteLLM endpoint with simulated latency and JWT-only token validation (no POST calls).
required | Authorization (string) or Authorization (null) (Authorization) |
Stream (boolean) or Stream (null) (Stream) Default: false | |
Array of objects (Messages) Default: [] | |
Model (string) or Model (null) (Model) Default: "openai/gpt-4o" | |
Temperature (number) or Temperature (null) (Temperature) Default: 0.1 | |
Max Completion Tokens (integer) or Max Completion Tokens (null) (Max Completion Tokens) Default: 1024 | |
Top P (number) or Top P (null) (Top P) Default: 0.01 | |
Mock Response (string) or Mock Response (null) (Mock Response) |
{- "stream": false,
- "messages": [ ],
- "model": "openai/gpt-4o",
- "temperature": 0.1,
- "max_completion_tokens": 1024,
- "top_p": 0.01,
- "mock_response": "string"
}null