> For clean Markdown of any page, append .md to the page URL.
> For a complete documentation index, see https://docs.meetstream.ai/api-reference/ap-is/mia/llms.txt.
> For full documentation content, see https://docs.meetstream.ai/api-reference/ap-is/mia/llms-full.txt.

# Create Agent Config

POST https://api.meetstream.ai/api/v1/mia
Content-Type: application/json

For more information on customizing the payload, please refer to [MIA Configurations](https://docs.meetstream.ai/guides/mia-meetstream-infrastructure-agents/mia-configurations) in our guides.

Reference: https://docs.meetstream.ai/api-reference/ap-is/mia/create-agent-config

## OpenAPI Specification

```yaml
openapi: 3.1.0
info:
  title: Meetstream API
  version: 1.0.0
paths:
  /api/v1/mia:
    post:
      operationId: create-agent-config
      summary: Create Agent Config
      description: >-
        For more information on customizing the payload, please refer to [MIA
        Configurations](https://docs.meetstream.ai/guides/mia-meetstream-infrastructure-agents/mia-configurations)
        in our guides.
      tags:
        - subpackage_mia
      parameters:
        - name: Authorization
          in: header
          description: 'Format: Token <your_api_key>'
          required: true
          schema:
            type: string
      responses:
        '201':
          description: Created
          content:
            application/json:
              schema:
                $ref: '#/components/schemas/MIACreateConfigResponse'
      requestBody:
        content:
          application/json:
            schema:
              $ref: '#/components/schemas/MIACreateConfigRequest'
servers:
  - url: https://api.meetstream.ai
components:
  schemas:
    MiaCreateConfigRequestMode:
      type: string
      enum:
        - pipeline
        - realtime
      title: MiaCreateConfigRequestMode
    MiaModelConfigThinkingConfig:
      type: object
      properties:
        include_thoughts:
          type: boolean
        thinking_budget:
          type: integer
      title: MiaModelConfigThinkingConfig
    MIAModelConfig:
      type: object
      properties:
        provider:
          type: string
        model:
          type: string
        system_prompt:
          type: string
        first_message:
          type: string
        max_token:
          type: integer
        temperature:
          type: number
          format: double
        voice:
          type: string
          description: Used for realtime mode models that have a built-in voice.
        modalities:
          type: array
          items:
            type: string
        max_response_output_tokens:
          type: integer
        max_tokens:
          type: integer
        top_p:
          type: number
          format: double
        frequency_penalty:
          type: number
          format: double
        presence_penalty:
          type: number
          format: double
        thinking_config:
          $ref: '#/components/schemas/MiaModelConfigThinkingConfig'
        enable_affective_dialog:
          type: boolean
        proactivity:
          type: boolean
        disable_automatic_activity_detection:
          type: boolean
      title: MIAModelConfig
    MIAVoiceConfig:
      type: object
      properties:
        provider:
          type: string
        model:
          type: string
        voice_id:
          type: string
        speed:
          type: number
          format: double
      title: MIAVoiceConfig
    MIATranscriberConfig:
      type: object
      properties:
        provider:
          type: string
        model:
          type: string
        language:
          type: string
        boostwords:
          type: array
          items:
            type: string
      title: MIATranscriberConfig
    MiaAgentConfigInterruptionMode:
      type: string
      enum:
        - adaptive
        - vad
      title: MiaAgentConfigInterruptionMode
    MIAInterruptionsConfig:
      type: object
      properties:
        min_duration_seconds:
          type: number
          format: double
        word_threshold:
          type: integer
      title: MIAInterruptionsConfig
    MIAAgentConfig:
      type: object
      properties:
        tools:
          type: array
          items:
            type: string
        preemptive_generation:
          type: boolean
        user_away_timeout:
          type: number
          format: double
        interruption_mode:
          $ref: '#/components/schemas/MiaAgentConfigInterruptionMode'
        interruptions:
          $ref: '#/components/schemas/MIAInterruptionsConfig'
        false_interruption_timeout:
          type: number
          format: double
        vad_eagerness:
          type: string
        vad_type:
          type: string
        enable_interruptions:
          type: boolean
        resume_false_interruption:
          type: boolean
        response_modality:
          type: string
        mcp_servers:
          type: object
          additionalProperties:
            description: Any type
        tools_enabled:
          type: boolean
        vad_threshold:
          type: number
          format: double
        vad_prefix_padding_ms:
          type: integer
        vad_silence_duration_ms:
          type: integer
        turn_detection:
          type: string
        vad_activation_threshold:
          type: number
          format: double
        vad_deactivation_threshold:
          type:
            - number
            - 'null'
          format: double
        vad_min_silence_duration_ms:
          type: integer
        vad_min_speech_duration_ms:
          type: integer
        vad_prefix_padding_duration_ms:
          type: integer
        endpointing_mode:
          type: string
        min_endpointing_delay:
          type: number
          format: double
        max_endpointing_delay:
          type: number
          format: double
      title: MIAAgentConfig
    MIAAudioConfig:
      type: object
      properties:
        sample_rate:
          type: integer
        num_channels:
          type: integer
      title: MIAAudioConfig
    MIAWakeWordConfig:
      type: object
      properties:
        enabled:
          type: boolean
        words:
          type: array
          items:
            type: string
        timeout:
          type: integer
      title: MIAWakeWordConfig
    MIAAvatarConfig:
      type: object
      properties:
        provider:
          type: string
        enabled:
          type: boolean
        avatar_id:
          type: string
      title: MIAAvatarConfig
    MIACreateConfigRequest:
      type: object
      properties:
        agent_name:
          type: string
        mode:
          $ref: '#/components/schemas/MiaCreateConfigRequestMode'
        model:
          $ref: '#/components/schemas/MIAModelConfig'
        voice:
          $ref: '#/components/schemas/MIAVoiceConfig'
        transcriber:
          $ref: '#/components/schemas/MIATranscriberConfig'
        agent:
          $ref: '#/components/schemas/MIAAgentConfig'
        audio:
          $ref: '#/components/schemas/MIAAudioConfig'
        wake_word:
          oneOf:
            - $ref: '#/components/schemas/MIAWakeWordConfig'
            - type: 'null'
        Avatar:
          $ref: '#/components/schemas/MIAAvatarConfig'
      required:
        - agent_name
        - mode
        - model
      title: MIACreateConfigRequest
    MiaAgentConfigObjectMode:
      type: string
      enum:
        - pipeline
        - realtime
      title: MiaAgentConfigObjectMode
    MIAAgentConfigObject:
      type: object
      properties:
        AgentConfigID:
          type: string
        UserID:
          type: string
        AgentName:
          type: string
        Mode:
          $ref: '#/components/schemas/MiaAgentConfigObjectMode'
        Model:
          $ref: '#/components/schemas/MIAModelConfig'
        Voice:
          oneOf:
            - $ref: '#/components/schemas/MIAVoiceConfig'
            - type: 'null'
        Transcriber:
          oneOf:
            - $ref: '#/components/schemas/MIATranscriberConfig'
            - type: 'null'
        Agent:
          $ref: '#/components/schemas/MIAAgentConfig'
        Audio:
          $ref: '#/components/schemas/MIAAudioConfig'
        Avatar:
          $ref: '#/components/schemas/MIAAvatarConfig'
        WakeWord:
          type: object
          additionalProperties:
            description: Any type
        CreatedAt:
          type: string
        UpdatedAt:
          type: string
      description: A full agent configuration object as returned by the API.
      title: MIAAgentConfigObject
    MIACreateConfigResponse:
      type: object
      properties:
        message:
          type: string
        agent_config_id:
          type: string
        agent_config:
          $ref: '#/components/schemas/MIAAgentConfigObject'
      title: MIACreateConfigResponse
  securitySchemes:
    TokenAuth:
      type: apiKey
      in: header
      name: Authorization
      description: 'Format: Token <your_api_key>'

```

## SDK Code Examples

```python Pipeline - OpenAI + elevenlabs + OpenAI
import requests

url = "https://api.meetstream.ai/api/v1/mia"

payload = {
    "agent_name": "MeetStream's agent",
    "mode": "pipeline",
    "model": {
        "provider": "openai",
        "model": "gpt-4o-mini",
        "system_prompt": "You are a helpful meeting assistant. Keep responses concise and friendly. Speak naturally and conversationally.",
        "first_message": "Hi! I'm your AI meeting assistant. How can I help you today?",
        "max_token": 2048,
        "temperature": 0.8
    },
    "voice": {
        "provider": "elevenlabs",
        "model": "tts-1",
        "voice_id": "alloy"
    },
    "transcriber": {
        "provider": "openai",
        "model": "whisper-1",
        "language": "en"
    },
    "agent": {
        "tools": ["current_time", "weather_now"],
        "preemptive_generation": True,
        "user_away_timeout": 15
    },
    "audio": {
        "sample_rate": 24000,
        "num_channels": 1
    }
}
headers = {
    "Authorization": "<apiKey>",
    "Content-Type": "application/json"
}

response = requests.post(url, json=payload, headers=headers)

print(response.json())
```

```javascript Pipeline - OpenAI + elevenlabs + OpenAI
const url = 'https://api.meetstream.ai/api/v1/mia';
const options = {
  method: 'POST',
  headers: {Authorization: '<apiKey>', 'Content-Type': 'application/json'},
  body: '{"agent_name":"MeetStream\'s agent","mode":"pipeline","model":{"provider":"openai","model":"gpt-4o-mini","system_prompt":"You are a helpful meeting assistant. Keep responses concise and friendly. Speak naturally and conversationally.","first_message":"Hi! I\'m your AI meeting assistant. How can I help you today?","max_token":2048,"temperature":0.8},"voice":{"provider":"elevenlabs","model":"tts-1","voice_id":"alloy"},"transcriber":{"provider":"openai","model":"whisper-1","language":"en"},"agent":{"tools":["current_time","weather_now"],"preemptive_generation":true,"user_away_timeout":15},"audio":{"sample_rate":24000,"num_channels":1}}'
};

try {
  const response = await fetch(url, options);
  const data = await response.json();
  console.log(data);
} catch (error) {
  console.error(error);
}
```

```go Pipeline - OpenAI + elevenlabs + OpenAI
package main

import (
	"fmt"
	"strings"
	"net/http"
	"io"
)

func main() {

	url := "https://api.meetstream.ai/api/v1/mia"

	payload := strings.NewReader("{\n  \"agent_name\": \"MeetStream's agent\",\n  \"mode\": \"pipeline\",\n  \"model\": {\n    \"provider\": \"openai\",\n    \"model\": \"gpt-4o-mini\",\n    \"system_prompt\": \"You are a helpful meeting assistant. Keep responses concise and friendly. Speak naturally and conversationally.\",\n    \"first_message\": \"Hi! I'm your AI meeting assistant. How can I help you today?\",\n    \"max_token\": 2048,\n    \"temperature\": 0.8\n  },\n  \"voice\": {\n    \"provider\": \"elevenlabs\",\n    \"model\": \"tts-1\",\n    \"voice_id\": \"alloy\"\n  },\n  \"transcriber\": {\n    \"provider\": \"openai\",\n    \"model\": \"whisper-1\",\n    \"language\": \"en\"\n  },\n  \"agent\": {\n    \"tools\": [\n      \"current_time\",\n      \"weather_now\"\n    ],\n    \"preemptive_generation\": true,\n    \"user_away_timeout\": 15\n  },\n  \"audio\": {\n    \"sample_rate\": 24000,\n    \"num_channels\": 1\n  }\n}")

	req, _ := http.NewRequest("POST", url, payload)

	req.Header.Add("Authorization", "<apiKey>")
	req.Header.Add("Content-Type", "application/json")

	res, _ := http.DefaultClient.Do(req)

	defer res.Body.Close()
	body, _ := io.ReadAll(res.Body)

	fmt.Println(res)
	fmt.Println(string(body))

}
```

```ruby Pipeline - OpenAI + elevenlabs + OpenAI
require 'uri'
require 'net/http'

url = URI("https://api.meetstream.ai/api/v1/mia")

http = Net::HTTP.new(url.host, url.port)
http.use_ssl = true

request = Net::HTTP::Post.new(url)
request["Authorization"] = '<apiKey>'
request["Content-Type"] = 'application/json'
request.body = "{\n  \"agent_name\": \"MeetStream's agent\",\n  \"mode\": \"pipeline\",\n  \"model\": {\n    \"provider\": \"openai\",\n    \"model\": \"gpt-4o-mini\",\n    \"system_prompt\": \"You are a helpful meeting assistant. Keep responses concise and friendly. Speak naturally and conversationally.\",\n    \"first_message\": \"Hi! I'm your AI meeting assistant. How can I help you today?\",\n    \"max_token\": 2048,\n    \"temperature\": 0.8\n  },\n  \"voice\": {\n    \"provider\": \"elevenlabs\",\n    \"model\": \"tts-1\",\n    \"voice_id\": \"alloy\"\n  },\n  \"transcriber\": {\n    \"provider\": \"openai\",\n    \"model\": \"whisper-1\",\n    \"language\": \"en\"\n  },\n  \"agent\": {\n    \"tools\": [\n      \"current_time\",\n      \"weather_now\"\n    ],\n    \"preemptive_generation\": true,\n    \"user_away_timeout\": 15\n  },\n  \"audio\": {\n    \"sample_rate\": 24000,\n    \"num_channels\": 1\n  }\n}"

response = http.request(request)
puts response.read_body
```

```java Pipeline - OpenAI + elevenlabs + OpenAI
import com.mashape.unirest.http.HttpResponse;
import com.mashape.unirest.http.Unirest;

HttpResponse<String> response = Unirest.post("https://api.meetstream.ai/api/v1/mia")
  .header("Authorization", "<apiKey>")
  .header("Content-Type", "application/json")
  .body("{\n  \"agent_name\": \"MeetStream's agent\",\n  \"mode\": \"pipeline\",\n  \"model\": {\n    \"provider\": \"openai\",\n    \"model\": \"gpt-4o-mini\",\n    \"system_prompt\": \"You are a helpful meeting assistant. Keep responses concise and friendly. Speak naturally and conversationally.\",\n    \"first_message\": \"Hi! I'm your AI meeting assistant. How can I help you today?\",\n    \"max_token\": 2048,\n    \"temperature\": 0.8\n  },\n  \"voice\": {\n    \"provider\": \"elevenlabs\",\n    \"model\": \"tts-1\",\n    \"voice_id\": \"alloy\"\n  },\n  \"transcriber\": {\n    \"provider\": \"openai\",\n    \"model\": \"whisper-1\",\n    \"language\": \"en\"\n  },\n  \"agent\": {\n    \"tools\": [\n      \"current_time\",\n      \"weather_now\"\n    ],\n    \"preemptive_generation\": true,\n    \"user_away_timeout\": 15\n  },\n  \"audio\": {\n    \"sample_rate\": 24000,\n    \"num_channels\": 1\n  }\n}")
  .asString();
```

```php Pipeline - OpenAI + elevenlabs + OpenAI
<?php
require_once('vendor/autoload.php');

$client = new \GuzzleHttp\Client();

$response = $client->request('POST', 'https://api.meetstream.ai/api/v1/mia', [
  'body' => '{
  "agent_name": "MeetStream\'s agent",
  "mode": "pipeline",
  "model": {
    "provider": "openai",
    "model": "gpt-4o-mini",
    "system_prompt": "You are a helpful meeting assistant. Keep responses concise and friendly. Speak naturally and conversationally.",
    "first_message": "Hi! I\'m your AI meeting assistant. How can I help you today?",
    "max_token": 2048,
    "temperature": 0.8
  },
  "voice": {
    "provider": "elevenlabs",
    "model": "tts-1",
    "voice_id": "alloy"
  },
  "transcriber": {
    "provider": "openai",
    "model": "whisper-1",
    "language": "en"
  },
  "agent": {
    "tools": [
      "current_time",
      "weather_now"
    ],
    "preemptive_generation": true,
    "user_away_timeout": 15
  },
  "audio": {
    "sample_rate": 24000,
    "num_channels": 1
  }
}',
  'headers' => [
    'Authorization' => '<apiKey>',
    'Content-Type' => 'application/json',
  ],
]);

echo $response->getBody();
```

```csharp Pipeline - OpenAI + elevenlabs + OpenAI
using RestSharp;

var client = new RestClient("https://api.meetstream.ai/api/v1/mia");
var request = new RestRequest(Method.POST);
request.AddHeader("Authorization", "<apiKey>");
request.AddHeader("Content-Type", "application/json");
request.AddParameter("application/json", "{\n  \"agent_name\": \"MeetStream's agent\",\n  \"mode\": \"pipeline\",\n  \"model\": {\n    \"provider\": \"openai\",\n    \"model\": \"gpt-4o-mini\",\n    \"system_prompt\": \"You are a helpful meeting assistant. Keep responses concise and friendly. Speak naturally and conversationally.\",\n    \"first_message\": \"Hi! I'm your AI meeting assistant. How can I help you today?\",\n    \"max_token\": 2048,\n    \"temperature\": 0.8\n  },\n  \"voice\": {\n    \"provider\": \"elevenlabs\",\n    \"model\": \"tts-1\",\n    \"voice_id\": \"alloy\"\n  },\n  \"transcriber\": {\n    \"provider\": \"openai\",\n    \"model\": \"whisper-1\",\n    \"language\": \"en\"\n  },\n  \"agent\": {\n    \"tools\": [\n      \"current_time\",\n      \"weather_now\"\n    ],\n    \"preemptive_generation\": true,\n    \"user_away_timeout\": 15\n  },\n  \"audio\": {\n    \"sample_rate\": 24000,\n    \"num_channels\": 1\n  }\n}", ParameterType.RequestBody);
IRestResponse response = client.Execute(request);
```

```swift Pipeline - OpenAI + elevenlabs + OpenAI
import Foundation

let headers = [
  "Authorization": "<apiKey>",
  "Content-Type": "application/json"
]
let parameters = [
  "agent_name": "MeetStream's agent",
  "mode": "pipeline",
  "model": [
    "provider": "openai",
    "model": "gpt-4o-mini",
    "system_prompt": "You are a helpful meeting assistant. Keep responses concise and friendly. Speak naturally and conversationally.",
    "first_message": "Hi! I'm your AI meeting assistant. How can I help you today?",
    "max_token": 2048,
    "temperature": 0.8
  ],
  "voice": [
    "provider": "elevenlabs",
    "model": "tts-1",
    "voice_id": "alloy"
  ],
  "transcriber": [
    "provider": "openai",
    "model": "whisper-1",
    "language": "en"
  ],
  "agent": [
    "tools": ["current_time", "weather_now"],
    "preemptive_generation": true,
    "user_away_timeout": 15
  ],
  "audio": [
    "sample_rate": 24000,
    "num_channels": 1
  ]
] as [String : Any]

let postData = JSONSerialization.data(withJSONObject: parameters, options: [])

let request = NSMutableURLRequest(url: NSURL(string: "https://api.meetstream.ai/api/v1/mia")! as URL,
                                        cachePolicy: .useProtocolCachePolicy,
                                    timeoutInterval: 10.0)
request.httpMethod = "POST"
request.allHTTPHeaderFields = headers
request.httpBody = postData as Data

let session = URLSession.shared
let dataTask = session.dataTask(with: request as URLRequest, completionHandler: { (data, response, error) -> Void in
  if (error != nil) {
    print(error as Any)
  } else {
    let httpResponse = response as? HTTPURLResponse
    print(httpResponse)
  }
})

dataTask.resume()
```

```python Realtime - OpenAI
import requests

url = "https://api.meetstream.ai/api/v1/mia"

payload = {
    "agent_name": "New MIA Agent",
    "mode": "realtime",
    "model": {
        "provider": "openai",
        "model": "gpt-realtime-mini",
        "system_prompt": "You are a helpful AI meeting assistant. Keep responses concise and natural. Listen actively and provide value to the conversation.",
        "first_message": "Hey there! I am your AI assistant, ready to help with your meeting.",
        "temperature": 0.8,
        "voice": "coral",
        "modalities": ["text", "audio"],
        "max_response_output_tokens": 200
    },
    "agent": {
        "tools": [],
        "preemptive_generation": False,
        "user_away_timeout": 15,
        "interruptions": {
            "min_duration_seconds": 0.5,
            "word_threshold": 0
        },
        "false_interruption_timeout": 2,
        "vad_type": "server_vad",
        "enable_interruptions": True,
        "resume_false_interruption": True,
        "tools_enabled": False,
        "vad_threshold": 0.5,
        "vad_prefix_padding_ms": 0,
        "vad_silence_duration_ms": 200
    },
    "audio": {
        "sample_rate": 24000,
        "num_channels": 1
    }
}
headers = {
    "Authorization": "<apiKey>",
    "Content-Type": "application/json"
}

response = requests.post(url, json=payload, headers=headers)

print(response.json())
```

```javascript Realtime - OpenAI
const url = 'https://api.meetstream.ai/api/v1/mia';
const options = {
  method: 'POST',
  headers: {Authorization: '<apiKey>', 'Content-Type': 'application/json'},
  body: '{"agent_name":"New MIA Agent","mode":"realtime","model":{"provider":"openai","model":"gpt-realtime-mini","system_prompt":"You are a helpful AI meeting assistant. Keep responses concise and natural. Listen actively and provide value to the conversation.","first_message":"Hey there! I am your AI assistant, ready to help with your meeting.","temperature":0.8,"voice":"coral","modalities":["text","audio"],"max_response_output_tokens":200},"agent":{"tools":[],"preemptive_generation":false,"user_away_timeout":15,"interruptions":{"min_duration_seconds":0.5,"word_threshold":0},"false_interruption_timeout":2,"vad_type":"server_vad","enable_interruptions":true,"resume_false_interruption":true,"tools_enabled":false,"vad_threshold":0.5,"vad_prefix_padding_ms":0,"vad_silence_duration_ms":200},"audio":{"sample_rate":24000,"num_channels":1}}'
};

try {
  const response = await fetch(url, options);
  const data = await response.json();
  console.log(data);
} catch (error) {
  console.error(error);
}
```

```go Realtime - OpenAI
package main

import (
	"fmt"
	"strings"
	"net/http"
	"io"
)

func main() {

	url := "https://api.meetstream.ai/api/v1/mia"

	payload := strings.NewReader("{\n  \"agent_name\": \"New MIA Agent\",\n  \"mode\": \"realtime\",\n  \"model\": {\n    \"provider\": \"openai\",\n    \"model\": \"gpt-realtime-mini\",\n    \"system_prompt\": \"You are a helpful AI meeting assistant. Keep responses concise and natural. Listen actively and provide value to the conversation.\",\n    \"first_message\": \"Hey there! I am your AI assistant, ready to help with your meeting.\",\n    \"temperature\": 0.8,\n    \"voice\": \"coral\",\n    \"modalities\": [\n      \"text\",\n      \"audio\"\n    ],\n    \"max_response_output_tokens\": 200\n  },\n  \"agent\": {\n    \"tools\": [],\n    \"preemptive_generation\": false,\n    \"user_away_timeout\": 15,\n    \"interruptions\": {\n      \"min_duration_seconds\": 0.5,\n      \"word_threshold\": 0\n    },\n    \"false_interruption_timeout\": 2,\n    \"vad_type\": \"server_vad\",\n    \"enable_interruptions\": true,\n    \"resume_false_interruption\": true,\n    \"tools_enabled\": false,\n    \"vad_threshold\": 0.5,\n    \"vad_prefix_padding_ms\": 0,\n    \"vad_silence_duration_ms\": 200\n  },\n  \"audio\": {\n    \"sample_rate\": 24000,\n    \"num_channels\": 1\n  }\n}")

	req, _ := http.NewRequest("POST", url, payload)

	req.Header.Add("Authorization", "<apiKey>")
	req.Header.Add("Content-Type", "application/json")

	res, _ := http.DefaultClient.Do(req)

	defer res.Body.Close()
	body, _ := io.ReadAll(res.Body)

	fmt.Println(res)
	fmt.Println(string(body))

}
```

```ruby Realtime - OpenAI
require 'uri'
require 'net/http'

url = URI("https://api.meetstream.ai/api/v1/mia")

http = Net::HTTP.new(url.host, url.port)
http.use_ssl = true

request = Net::HTTP::Post.new(url)
request["Authorization"] = '<apiKey>'
request["Content-Type"] = 'application/json'
request.body = "{\n  \"agent_name\": \"New MIA Agent\",\n  \"mode\": \"realtime\",\n  \"model\": {\n    \"provider\": \"openai\",\n    \"model\": \"gpt-realtime-mini\",\n    \"system_prompt\": \"You are a helpful AI meeting assistant. Keep responses concise and natural. Listen actively and provide value to the conversation.\",\n    \"first_message\": \"Hey there! I am your AI assistant, ready to help with your meeting.\",\n    \"temperature\": 0.8,\n    \"voice\": \"coral\",\n    \"modalities\": [\n      \"text\",\n      \"audio\"\n    ],\n    \"max_response_output_tokens\": 200\n  },\n  \"agent\": {\n    \"tools\": [],\n    \"preemptive_generation\": false,\n    \"user_away_timeout\": 15,\n    \"interruptions\": {\n      \"min_duration_seconds\": 0.5,\n      \"word_threshold\": 0\n    },\n    \"false_interruption_timeout\": 2,\n    \"vad_type\": \"server_vad\",\n    \"enable_interruptions\": true,\n    \"resume_false_interruption\": true,\n    \"tools_enabled\": false,\n    \"vad_threshold\": 0.5,\n    \"vad_prefix_padding_ms\": 0,\n    \"vad_silence_duration_ms\": 200\n  },\n  \"audio\": {\n    \"sample_rate\": 24000,\n    \"num_channels\": 1\n  }\n}"

response = http.request(request)
puts response.read_body
```

```java Realtime - OpenAI
import com.mashape.unirest.http.HttpResponse;
import com.mashape.unirest.http.Unirest;

HttpResponse<String> response = Unirest.post("https://api.meetstream.ai/api/v1/mia")
  .header("Authorization", "<apiKey>")
  .header("Content-Type", "application/json")
  .body("{\n  \"agent_name\": \"New MIA Agent\",\n  \"mode\": \"realtime\",\n  \"model\": {\n    \"provider\": \"openai\",\n    \"model\": \"gpt-realtime-mini\",\n    \"system_prompt\": \"You are a helpful AI meeting assistant. Keep responses concise and natural. Listen actively and provide value to the conversation.\",\n    \"first_message\": \"Hey there! I am your AI assistant, ready to help with your meeting.\",\n    \"temperature\": 0.8,\n    \"voice\": \"coral\",\n    \"modalities\": [\n      \"text\",\n      \"audio\"\n    ],\n    \"max_response_output_tokens\": 200\n  },\n  \"agent\": {\n    \"tools\": [],\n    \"preemptive_generation\": false,\n    \"user_away_timeout\": 15,\n    \"interruptions\": {\n      \"min_duration_seconds\": 0.5,\n      \"word_threshold\": 0\n    },\n    \"false_interruption_timeout\": 2,\n    \"vad_type\": \"server_vad\",\n    \"enable_interruptions\": true,\n    \"resume_false_interruption\": true,\n    \"tools_enabled\": false,\n    \"vad_threshold\": 0.5,\n    \"vad_prefix_padding_ms\": 0,\n    \"vad_silence_duration_ms\": 200\n  },\n  \"audio\": {\n    \"sample_rate\": 24000,\n    \"num_channels\": 1\n  }\n}")
  .asString();
```

```php Realtime - OpenAI
<?php
require_once('vendor/autoload.php');

$client = new \GuzzleHttp\Client();

$response = $client->request('POST', 'https://api.meetstream.ai/api/v1/mia', [
  'body' => '{
  "agent_name": "New MIA Agent",
  "mode": "realtime",
  "model": {
    "provider": "openai",
    "model": "gpt-realtime-mini",
    "system_prompt": "You are a helpful AI meeting assistant. Keep responses concise and natural. Listen actively and provide value to the conversation.",
    "first_message": "Hey there! I am your AI assistant, ready to help with your meeting.",
    "temperature": 0.8,
    "voice": "coral",
    "modalities": [
      "text",
      "audio"
    ],
    "max_response_output_tokens": 200
  },
  "agent": {
    "tools": [],
    "preemptive_generation": false,
    "user_away_timeout": 15,
    "interruptions": {
      "min_duration_seconds": 0.5,
      "word_threshold": 0
    },
    "false_interruption_timeout": 2,
    "vad_type": "server_vad",
    "enable_interruptions": true,
    "resume_false_interruption": true,
    "tools_enabled": false,
    "vad_threshold": 0.5,
    "vad_prefix_padding_ms": 0,
    "vad_silence_duration_ms": 200
  },
  "audio": {
    "sample_rate": 24000,
    "num_channels": 1
  }
}',
  'headers' => [
    'Authorization' => '<apiKey>',
    'Content-Type' => 'application/json',
  ],
]);

echo $response->getBody();
```

```csharp Realtime - OpenAI
using RestSharp;

var client = new RestClient("https://api.meetstream.ai/api/v1/mia");
var request = new RestRequest(Method.POST);
request.AddHeader("Authorization", "<apiKey>");
request.AddHeader("Content-Type", "application/json");
request.AddParameter("application/json", "{\n  \"agent_name\": \"New MIA Agent\",\n  \"mode\": \"realtime\",\n  \"model\": {\n    \"provider\": \"openai\",\n    \"model\": \"gpt-realtime-mini\",\n    \"system_prompt\": \"You are a helpful AI meeting assistant. Keep responses concise and natural. Listen actively and provide value to the conversation.\",\n    \"first_message\": \"Hey there! I am your AI assistant, ready to help with your meeting.\",\n    \"temperature\": 0.8,\n    \"voice\": \"coral\",\n    \"modalities\": [\n      \"text\",\n      \"audio\"\n    ],\n    \"max_response_output_tokens\": 200\n  },\n  \"agent\": {\n    \"tools\": [],\n    \"preemptive_generation\": false,\n    \"user_away_timeout\": 15,\n    \"interruptions\": {\n      \"min_duration_seconds\": 0.5,\n      \"word_threshold\": 0\n    },\n    \"false_interruption_timeout\": 2,\n    \"vad_type\": \"server_vad\",\n    \"enable_interruptions\": true,\n    \"resume_false_interruption\": true,\n    \"tools_enabled\": false,\n    \"vad_threshold\": 0.5,\n    \"vad_prefix_padding_ms\": 0,\n    \"vad_silence_duration_ms\": 200\n  },\n  \"audio\": {\n    \"sample_rate\": 24000,\n    \"num_channels\": 1\n  }\n}", ParameterType.RequestBody);
IRestResponse response = client.Execute(request);
```

```swift Realtime - OpenAI
import Foundation

let headers = [
  "Authorization": "<apiKey>",
  "Content-Type": "application/json"
]
let parameters = [
  "agent_name": "New MIA Agent",
  "mode": "realtime",
  "model": [
    "provider": "openai",
    "model": "gpt-realtime-mini",
    "system_prompt": "You are a helpful AI meeting assistant. Keep responses concise and natural. Listen actively and provide value to the conversation.",
    "first_message": "Hey there! I am your AI assistant, ready to help with your meeting.",
    "temperature": 0.8,
    "voice": "coral",
    "modalities": ["text", "audio"],
    "max_response_output_tokens": 200
  ],
  "agent": [
    "tools": [],
    "preemptive_generation": false,
    "user_away_timeout": 15,
    "interruptions": [
      "min_duration_seconds": 0.5,
      "word_threshold": 0
    ],
    "false_interruption_timeout": 2,
    "vad_type": "server_vad",
    "enable_interruptions": true,
    "resume_false_interruption": true,
    "tools_enabled": false,
    "vad_threshold": 0.5,
    "vad_prefix_padding_ms": 0,
    "vad_silence_duration_ms": 200
  ],
  "audio": [
    "sample_rate": 24000,
    "num_channels": 1
  ]
] as [String : Any]

let postData = JSONSerialization.data(withJSONObject: parameters, options: [])

let request = NSMutableURLRequest(url: NSURL(string: "https://api.meetstream.ai/api/v1/mia")! as URL,
                                        cachePolicy: .useProtocolCachePolicy,
                                    timeoutInterval: 10.0)
request.httpMethod = "POST"
request.allHTTPHeaderFields = headers
request.httpBody = postData as Data

let session = URLSession.shared
let dataTask = session.dataTask(with: request as URLRequest, completionHandler: { (data, response, error) -> Void in
  if (error != nil) {
    print(error as Any)
  } else {
    let httpResponse = response as? HTTPURLResponse
    print(httpResponse)
  }
})

dataTask.resume()
```

```python Realtime - Gemini
import requests

url = "https://api.meetstream.ai/api/v1/mia"

payload = {
    "agent_name": "Gemini Full MIA",
    "mode": "realtime",
    "model": {
        "provider": "google",
        "model": "gemini-2.5-flash-native-audio-preview-12-2025",
        "system_prompt": "You are an expressive, helpful AI meeting assistant with advanced capabilities. Be natural, engaging, and provide value to the conversation.",
        "temperature": 0.8,
        "voice": "Kore",
        "thinking_config": {
            "include_thoughts": False,
            "thinking_budget": 1024
        },
        "enable_affective_dialog": True,
        "proactivity": True,
        "disable_automatic_activity_detection": True
    },
    "agent": {
        "preemptive_generation": False,
        "user_away_timeout": 15
    },
    "audio": {
        "sample_rate": 24000,
        "num_channels": 1
    }
}
headers = {
    "Authorization": "<apiKey>",
    "Content-Type": "application/json"
}

response = requests.post(url, json=payload, headers=headers)

print(response.json())
```

```javascript Realtime - Gemini
const url = 'https://api.meetstream.ai/api/v1/mia';
const options = {
  method: 'POST',
  headers: {Authorization: '<apiKey>', 'Content-Type': 'application/json'},
  body: '{"agent_name":"Gemini Full MIA","mode":"realtime","model":{"provider":"google","model":"gemini-2.5-flash-native-audio-preview-12-2025","system_prompt":"You are an expressive, helpful AI meeting assistant with advanced capabilities. Be natural, engaging, and provide value to the conversation.","temperature":0.8,"voice":"Kore","thinking_config":{"include_thoughts":false,"thinking_budget":1024},"enable_affective_dialog":true,"proactivity":true,"disable_automatic_activity_detection":true},"agent":{"preemptive_generation":false,"user_away_timeout":15},"audio":{"sample_rate":24000,"num_channels":1}}'
};

try {
  const response = await fetch(url, options);
  const data = await response.json();
  console.log(data);
} catch (error) {
  console.error(error);
}
```

```go Realtime - Gemini
package main

import (
	"fmt"
	"strings"
	"net/http"
	"io"
)

func main() {

	url := "https://api.meetstream.ai/api/v1/mia"

	payload := strings.NewReader("{\n  \"agent_name\": \"Gemini Full MIA\",\n  \"mode\": \"realtime\",\n  \"model\": {\n    \"provider\": \"google\",\n    \"model\": \"gemini-2.5-flash-native-audio-preview-12-2025\",\n    \"system_prompt\": \"You are an expressive, helpful AI meeting assistant with advanced capabilities. Be natural, engaging, and provide value to the conversation.\",\n    \"temperature\": 0.8,\n    \"voice\": \"Kore\",\n    \"thinking_config\": {\n      \"include_thoughts\": false,\n      \"thinking_budget\": 1024\n    },\n    \"enable_affective_dialog\": true,\n    \"proactivity\": true,\n    \"disable_automatic_activity_detection\": true\n  },\n  \"agent\": {\n    \"preemptive_generation\": false,\n    \"user_away_timeout\": 15\n  },\n  \"audio\": {\n    \"sample_rate\": 24000,\n    \"num_channels\": 1\n  }\n}")

	req, _ := http.NewRequest("POST", url, payload)

	req.Header.Add("Authorization", "<apiKey>")
	req.Header.Add("Content-Type", "application/json")

	res, _ := http.DefaultClient.Do(req)

	defer res.Body.Close()
	body, _ := io.ReadAll(res.Body)

	fmt.Println(res)
	fmt.Println(string(body))

}
```

```ruby Realtime - Gemini
require 'uri'
require 'net/http'

url = URI("https://api.meetstream.ai/api/v1/mia")

http = Net::HTTP.new(url.host, url.port)
http.use_ssl = true

request = Net::HTTP::Post.new(url)
request["Authorization"] = '<apiKey>'
request["Content-Type"] = 'application/json'
request.body = "{\n  \"agent_name\": \"Gemini Full MIA\",\n  \"mode\": \"realtime\",\n  \"model\": {\n    \"provider\": \"google\",\n    \"model\": \"gemini-2.5-flash-native-audio-preview-12-2025\",\n    \"system_prompt\": \"You are an expressive, helpful AI meeting assistant with advanced capabilities. Be natural, engaging, and provide value to the conversation.\",\n    \"temperature\": 0.8,\n    \"voice\": \"Kore\",\n    \"thinking_config\": {\n      \"include_thoughts\": false,\n      \"thinking_budget\": 1024\n    },\n    \"enable_affective_dialog\": true,\n    \"proactivity\": true,\n    \"disable_automatic_activity_detection\": true\n  },\n  \"agent\": {\n    \"preemptive_generation\": false,\n    \"user_away_timeout\": 15\n  },\n  \"audio\": {\n    \"sample_rate\": 24000,\n    \"num_channels\": 1\n  }\n}"

response = http.request(request)
puts response.read_body
```

```java Realtime - Gemini
import com.mashape.unirest.http.HttpResponse;
import com.mashape.unirest.http.Unirest;

HttpResponse<String> response = Unirest.post("https://api.meetstream.ai/api/v1/mia")
  .header("Authorization", "<apiKey>")
  .header("Content-Type", "application/json")
  .body("{\n  \"agent_name\": \"Gemini Full MIA\",\n  \"mode\": \"realtime\",\n  \"model\": {\n    \"provider\": \"google\",\n    \"model\": \"gemini-2.5-flash-native-audio-preview-12-2025\",\n    \"system_prompt\": \"You are an expressive, helpful AI meeting assistant with advanced capabilities. Be natural, engaging, and provide value to the conversation.\",\n    \"temperature\": 0.8,\n    \"voice\": \"Kore\",\n    \"thinking_config\": {\n      \"include_thoughts\": false,\n      \"thinking_budget\": 1024\n    },\n    \"enable_affective_dialog\": true,\n    \"proactivity\": true,\n    \"disable_automatic_activity_detection\": true\n  },\n  \"agent\": {\n    \"preemptive_generation\": false,\n    \"user_away_timeout\": 15\n  },\n  \"audio\": {\n    \"sample_rate\": 24000,\n    \"num_channels\": 1\n  }\n}")
  .asString();
```

```php Realtime - Gemini
<?php
require_once('vendor/autoload.php');

$client = new \GuzzleHttp\Client();

$response = $client->request('POST', 'https://api.meetstream.ai/api/v1/mia', [
  'body' => '{
  "agent_name": "Gemini Full MIA",
  "mode": "realtime",
  "model": {
    "provider": "google",
    "model": "gemini-2.5-flash-native-audio-preview-12-2025",
    "system_prompt": "You are an expressive, helpful AI meeting assistant with advanced capabilities. Be natural, engaging, and provide value to the conversation.",
    "temperature": 0.8,
    "voice": "Kore",
    "thinking_config": {
      "include_thoughts": false,
      "thinking_budget": 1024
    },
    "enable_affective_dialog": true,
    "proactivity": true,
    "disable_automatic_activity_detection": true
  },
  "agent": {
    "preemptive_generation": false,
    "user_away_timeout": 15
  },
  "audio": {
    "sample_rate": 24000,
    "num_channels": 1
  }
}',
  'headers' => [
    'Authorization' => '<apiKey>',
    'Content-Type' => 'application/json',
  ],
]);

echo $response->getBody();
```

```csharp Realtime - Gemini
using RestSharp;

var client = new RestClient("https://api.meetstream.ai/api/v1/mia");
var request = new RestRequest(Method.POST);
request.AddHeader("Authorization", "<apiKey>");
request.AddHeader("Content-Type", "application/json");
request.AddParameter("application/json", "{\n  \"agent_name\": \"Gemini Full MIA\",\n  \"mode\": \"realtime\",\n  \"model\": {\n    \"provider\": \"google\",\n    \"model\": \"gemini-2.5-flash-native-audio-preview-12-2025\",\n    \"system_prompt\": \"You are an expressive, helpful AI meeting assistant with advanced capabilities. Be natural, engaging, and provide value to the conversation.\",\n    \"temperature\": 0.8,\n    \"voice\": \"Kore\",\n    \"thinking_config\": {\n      \"include_thoughts\": false,\n      \"thinking_budget\": 1024\n    },\n    \"enable_affective_dialog\": true,\n    \"proactivity\": true,\n    \"disable_automatic_activity_detection\": true\n  },\n  \"agent\": {\n    \"preemptive_generation\": false,\n    \"user_away_timeout\": 15\n  },\n  \"audio\": {\n    \"sample_rate\": 24000,\n    \"num_channels\": 1\n  }\n}", ParameterType.RequestBody);
IRestResponse response = client.Execute(request);
```

```swift Realtime - Gemini
import Foundation

let headers = [
  "Authorization": "<apiKey>",
  "Content-Type": "application/json"
]
let parameters = [
  "agent_name": "Gemini Full MIA",
  "mode": "realtime",
  "model": [
    "provider": "google",
    "model": "gemini-2.5-flash-native-audio-preview-12-2025",
    "system_prompt": "You are an expressive, helpful AI meeting assistant with advanced capabilities. Be natural, engaging, and provide value to the conversation.",
    "temperature": 0.8,
    "voice": "Kore",
    "thinking_config": [
      "include_thoughts": false,
      "thinking_budget": 1024
    ],
    "enable_affective_dialog": true,
    "proactivity": true,
    "disable_automatic_activity_detection": true
  ],
  "agent": [
    "preemptive_generation": false,
    "user_away_timeout": 15
  ],
  "audio": [
    "sample_rate": 24000,
    "num_channels": 1
  ]
] as [String : Any]

let postData = JSONSerialization.data(withJSONObject: parameters, options: [])

let request = NSMutableURLRequest(url: NSURL(string: "https://api.meetstream.ai/api/v1/mia")! as URL,
                                        cachePolicy: .useProtocolCachePolicy,
                                    timeoutInterval: 10.0)
request.httpMethod = "POST"
request.allHTTPHeaderFields = headers
request.httpBody = postData as Data

let session = URLSession.shared
let dataTask = session.dataTask(with: request as URLRequest, completionHandler: { (data, response, error) -> Void in
  if (error != nil) {
    print(error as Any)
  } else {
    let httpResponse = response as? HTTPURLResponse
    print(httpResponse)
  }
})

dataTask.resume()
```

```python Realtime - xAI Grok
import requests

url = "https://api.meetstream.ai/api/v1/mia"

payload = {
    "agent_name": "MIA xAI Agent",
    "mode": "realtime",
    "model": {
        "provider": "xai",
        "system_prompt": "You are a helpful AI meeting assistant. Keep responses concise and natural.",
        "first_message": "Hey there! I am your AI assistant, ready to help with your meeting.",
        "voice": "Ara"
    },
    "agent": {
        "tools": [],
        "preemptive_generation": False,
        "user_away_timeout": 15,
        "interruptions": {
            "min_duration_seconds": 0.5,
            "word_threshold": 0
        },
        "false_interruption_timeout": 2,
        "enable_interruptions": True,
        "resume_false_interruption": True
    },
    "audio": {
        "sample_rate": 24000,
        "num_channels": 1
    }
}
headers = {
    "Authorization": "<apiKey>",
    "Content-Type": "application/json"
}

response = requests.post(url, json=payload, headers=headers)

print(response.json())
```

```javascript Realtime - xAI Grok
const url = 'https://api.meetstream.ai/api/v1/mia';
const options = {
  method: 'POST',
  headers: {Authorization: '<apiKey>', 'Content-Type': 'application/json'},
  body: '{"agent_name":"MIA xAI Agent","mode":"realtime","model":{"provider":"xai","system_prompt":"You are a helpful AI meeting assistant. Keep responses concise and natural.","first_message":"Hey there! I am your AI assistant, ready to help with your meeting.","voice":"Ara"},"agent":{"tools":[],"preemptive_generation":false,"user_away_timeout":15,"interruptions":{"min_duration_seconds":0.5,"word_threshold":0},"false_interruption_timeout":2,"enable_interruptions":true,"resume_false_interruption":true},"audio":{"sample_rate":24000,"num_channels":1}}'
};

try {
  const response = await fetch(url, options);
  const data = await response.json();
  console.log(data);
} catch (error) {
  console.error(error);
}
```

```go Realtime - xAI Grok
package main

import (
	"fmt"
	"strings"
	"net/http"
	"io"
)

func main() {

	url := "https://api.meetstream.ai/api/v1/mia"

	payload := strings.NewReader("{\n  \"agent_name\": \"MIA xAI Agent\",\n  \"mode\": \"realtime\",\n  \"model\": {\n    \"provider\": \"xai\",\n    \"system_prompt\": \"You are a helpful AI meeting assistant. Keep responses concise and natural.\",\n    \"first_message\": \"Hey there! I am your AI assistant, ready to help with your meeting.\",\n    \"voice\": \"Ara\"\n  },\n  \"agent\": {\n    \"tools\": [],\n    \"preemptive_generation\": false,\n    \"user_away_timeout\": 15,\n    \"interruptions\": {\n      \"min_duration_seconds\": 0.5,\n      \"word_threshold\": 0\n    },\n    \"false_interruption_timeout\": 2,\n    \"enable_interruptions\": true,\n    \"resume_false_interruption\": true\n  },\n  \"audio\": {\n    \"sample_rate\": 24000,\n    \"num_channels\": 1\n  }\n}")

	req, _ := http.NewRequest("POST", url, payload)

	req.Header.Add("Authorization", "<apiKey>")
	req.Header.Add("Content-Type", "application/json")

	res, _ := http.DefaultClient.Do(req)

	defer res.Body.Close()
	body, _ := io.ReadAll(res.Body)

	fmt.Println(res)
	fmt.Println(string(body))

}
```

```ruby Realtime - xAI Grok
require 'uri'
require 'net/http'

url = URI("https://api.meetstream.ai/api/v1/mia")

http = Net::HTTP.new(url.host, url.port)
http.use_ssl = true

request = Net::HTTP::Post.new(url)
request["Authorization"] = '<apiKey>'
request["Content-Type"] = 'application/json'
request.body = "{\n  \"agent_name\": \"MIA xAI Agent\",\n  \"mode\": \"realtime\",\n  \"model\": {\n    \"provider\": \"xai\",\n    \"system_prompt\": \"You are a helpful AI meeting assistant. Keep responses concise and natural.\",\n    \"first_message\": \"Hey there! I am your AI assistant, ready to help with your meeting.\",\n    \"voice\": \"Ara\"\n  },\n  \"agent\": {\n    \"tools\": [],\n    \"preemptive_generation\": false,\n    \"user_away_timeout\": 15,\n    \"interruptions\": {\n      \"min_duration_seconds\": 0.5,\n      \"word_threshold\": 0\n    },\n    \"false_interruption_timeout\": 2,\n    \"enable_interruptions\": true,\n    \"resume_false_interruption\": true\n  },\n  \"audio\": {\n    \"sample_rate\": 24000,\n    \"num_channels\": 1\n  }\n}"

response = http.request(request)
puts response.read_body
```

```java Realtime - xAI Grok
import com.mashape.unirest.http.HttpResponse;
import com.mashape.unirest.http.Unirest;

HttpResponse<String> response = Unirest.post("https://api.meetstream.ai/api/v1/mia")
  .header("Authorization", "<apiKey>")
  .header("Content-Type", "application/json")
  .body("{\n  \"agent_name\": \"MIA xAI Agent\",\n  \"mode\": \"realtime\",\n  \"model\": {\n    \"provider\": \"xai\",\n    \"system_prompt\": \"You are a helpful AI meeting assistant. Keep responses concise and natural.\",\n    \"first_message\": \"Hey there! I am your AI assistant, ready to help with your meeting.\",\n    \"voice\": \"Ara\"\n  },\n  \"agent\": {\n    \"tools\": [],\n    \"preemptive_generation\": false,\n    \"user_away_timeout\": 15,\n    \"interruptions\": {\n      \"min_duration_seconds\": 0.5,\n      \"word_threshold\": 0\n    },\n    \"false_interruption_timeout\": 2,\n    \"enable_interruptions\": true,\n    \"resume_false_interruption\": true\n  },\n  \"audio\": {\n    \"sample_rate\": 24000,\n    \"num_channels\": 1\n  }\n}")
  .asString();
```

```php Realtime - xAI Grok
<?php
require_once('vendor/autoload.php');

$client = new \GuzzleHttp\Client();

$response = $client->request('POST', 'https://api.meetstream.ai/api/v1/mia', [
  'body' => '{
  "agent_name": "MIA xAI Agent",
  "mode": "realtime",
  "model": {
    "provider": "xai",
    "system_prompt": "You are a helpful AI meeting assistant. Keep responses concise and natural.",
    "first_message": "Hey there! I am your AI assistant, ready to help with your meeting.",
    "voice": "Ara"
  },
  "agent": {
    "tools": [],
    "preemptive_generation": false,
    "user_away_timeout": 15,
    "interruptions": {
      "min_duration_seconds": 0.5,
      "word_threshold": 0
    },
    "false_interruption_timeout": 2,
    "enable_interruptions": true,
    "resume_false_interruption": true
  },
  "audio": {
    "sample_rate": 24000,
    "num_channels": 1
  }
}',
  'headers' => [
    'Authorization' => '<apiKey>',
    'Content-Type' => 'application/json',
  ],
]);

echo $response->getBody();
```

```csharp Realtime - xAI Grok
using RestSharp;

var client = new RestClient("https://api.meetstream.ai/api/v1/mia");
var request = new RestRequest(Method.POST);
request.AddHeader("Authorization", "<apiKey>");
request.AddHeader("Content-Type", "application/json");
request.AddParameter("application/json", "{\n  \"agent_name\": \"MIA xAI Agent\",\n  \"mode\": \"realtime\",\n  \"model\": {\n    \"provider\": \"xai\",\n    \"system_prompt\": \"You are a helpful AI meeting assistant. Keep responses concise and natural.\",\n    \"first_message\": \"Hey there! I am your AI assistant, ready to help with your meeting.\",\n    \"voice\": \"Ara\"\n  },\n  \"agent\": {\n    \"tools\": [],\n    \"preemptive_generation\": false,\n    \"user_away_timeout\": 15,\n    \"interruptions\": {\n      \"min_duration_seconds\": 0.5,\n      \"word_threshold\": 0\n    },\n    \"false_interruption_timeout\": 2,\n    \"enable_interruptions\": true,\n    \"resume_false_interruption\": true\n  },\n  \"audio\": {\n    \"sample_rate\": 24000,\n    \"num_channels\": 1\n  }\n}", ParameterType.RequestBody);
IRestResponse response = client.Execute(request);
```

```swift Realtime - xAI Grok
import Foundation

let headers = [
  "Authorization": "<apiKey>",
  "Content-Type": "application/json"
]
let parameters = [
  "agent_name": "MIA xAI Agent",
  "mode": "realtime",
  "model": [
    "provider": "xai",
    "system_prompt": "You are a helpful AI meeting assistant. Keep responses concise and natural.",
    "first_message": "Hey there! I am your AI assistant, ready to help with your meeting.",
    "voice": "Ara"
  ],
  "agent": [
    "tools": [],
    "preemptive_generation": false,
    "user_away_timeout": 15,
    "interruptions": [
      "min_duration_seconds": 0.5,
      "word_threshold": 0
    ],
    "false_interruption_timeout": 2,
    "enable_interruptions": true,
    "resume_false_interruption": true
  ],
  "audio": [
    "sample_rate": 24000,
    "num_channels": 1
  ]
] as [String : Any]

let postData = JSONSerialization.data(withJSONObject: parameters, options: [])

let request = NSMutableURLRequest(url: NSURL(string: "https://api.meetstream.ai/api/v1/mia")! as URL,
                                        cachePolicy: .useProtocolCachePolicy,
                                    timeoutInterval: 10.0)
request.httpMethod = "POST"
request.allHTTPHeaderFields = headers
request.httpBody = postData as Data

let session = URLSession.shared
let dataTask = session.dataTask(with: request as URLRequest, completionHandler: { (data, response, error) -> Void in
  if (error != nil) {
    print(error as Any)
  } else {
    let httpResponse = response as? HTTPURLResponse
    print(httpResponse)
  }
})

dataTask.resume()
```

```python Pipeline - OpenAI + OpenAI + Deepgram
import requests

url = "https://api.meetstream.ai/api/v1/mia"

payload = {
    "agent_name": "V's Pipeline Agent",
    "mode": "pipeline",
    "model": {
        "provider": "openai",
        "model": "gpt-5-nano",
        "system_prompt": "Hi",
        "first_message": "Hi",
        "temperature": 0.8,
        "max_tokens": 150,
        "top_p": 1,
        "frequency_penalty": 0,
        "presence_penalty": 0
    },
    "voice": {
        "provider": "openai",
        "model": "tts-1",
        "voice_id": "alloy",
        "speed": 1
    },
    "transcriber": {
        "provider": "deepgram",
        "model": "nova-2",
        "language": "en",
        "boostwords": ["hey assistant", "MeetStream", "LiveKit"]
    },
    "agent": {
        "tools": [],
        "preemptive_generation": False,
        "user_away_timeout": 15,
        "interruption_mode": "adaptive",
        "interruptions": {
            "min_duration_seconds": 0.5,
            "word_threshold": 2
        },
        "false_interruption_timeout": 2,
        "enable_interruptions": True,
        "resume_false_interruption": True,
        "turn_detection": "vad",
        "vad_activation_threshold": 0.5,
        "vad_min_silence_duration_ms": 550,
        "vad_min_speech_duration_ms": 50,
        "vad_prefix_padding_duration_ms": 300,
        "endpointing_mode": "fixed",
        "min_endpointing_delay": 0.5,
        "max_endpointing_delay": 3
    },
    "audio": {
        "sample_rate": 24000,
        "num_channels": 1
    },
    "wake_word": {
        "enabled": True,
        "words": ["hello"],
        "timeout": 30
    }
}
headers = {
    "Authorization": "<apiKey>",
    "Content-Type": "application/json"
}

response = requests.post(url, json=payload, headers=headers)

print(response.json())
```

```javascript Pipeline - OpenAI + OpenAI + Deepgram
const url = 'https://api.meetstream.ai/api/v1/mia';
const options = {
  method: 'POST',
  headers: {Authorization: '<apiKey>', 'Content-Type': 'application/json'},
  body: '{"agent_name":"V\'s Pipeline Agent","mode":"pipeline","model":{"provider":"openai","model":"gpt-5-nano","system_prompt":"Hi","first_message":"Hi","temperature":0.8,"max_tokens":150,"top_p":1,"frequency_penalty":0,"presence_penalty":0},"voice":{"provider":"openai","model":"tts-1","voice_id":"alloy","speed":1},"transcriber":{"provider":"deepgram","model":"nova-2","language":"en","boostwords":["hey assistant","MeetStream","LiveKit"]},"agent":{"tools":[],"preemptive_generation":false,"user_away_timeout":15,"interruption_mode":"adaptive","interruptions":{"min_duration_seconds":0.5,"word_threshold":2},"false_interruption_timeout":2,"enable_interruptions":true,"resume_false_interruption":true,"turn_detection":"vad","vad_activation_threshold":0.5,"vad_min_silence_duration_ms":550,"vad_min_speech_duration_ms":50,"vad_prefix_padding_duration_ms":300,"endpointing_mode":"fixed","min_endpointing_delay":0.5,"max_endpointing_delay":3},"audio":{"sample_rate":24000,"num_channels":1},"wake_word":{"enabled":true,"words":["hello"],"timeout":30}}'
};

try {
  const response = await fetch(url, options);
  const data = await response.json();
  console.log(data);
} catch (error) {
  console.error(error);
}
```

```go Pipeline - OpenAI + OpenAI + Deepgram
package main

import (
	"fmt"
	"strings"
	"net/http"
	"io"
)

func main() {

	url := "https://api.meetstream.ai/api/v1/mia"

	payload := strings.NewReader("{\n  \"agent_name\": \"V's Pipeline Agent\",\n  \"mode\": \"pipeline\",\n  \"model\": {\n    \"provider\": \"openai\",\n    \"model\": \"gpt-5-nano\",\n    \"system_prompt\": \"Hi\",\n    \"first_message\": \"Hi\",\n    \"temperature\": 0.8,\n    \"max_tokens\": 150,\n    \"top_p\": 1,\n    \"frequency_penalty\": 0,\n    \"presence_penalty\": 0\n  },\n  \"voice\": {\n    \"provider\": \"openai\",\n    \"model\": \"tts-1\",\n    \"voice_id\": \"alloy\",\n    \"speed\": 1\n  },\n  \"transcriber\": {\n    \"provider\": \"deepgram\",\n    \"model\": \"nova-2\",\n    \"language\": \"en\",\n    \"boostwords\": [\n      \"hey assistant\",\n      \"MeetStream\",\n      \"LiveKit\"\n    ]\n  },\n  \"agent\": {\n    \"tools\": [],\n    \"preemptive_generation\": false,\n    \"user_away_timeout\": 15,\n    \"interruption_mode\": \"adaptive\",\n    \"interruptions\": {\n      \"min_duration_seconds\": 0.5,\n      \"word_threshold\": 2\n    },\n    \"false_interruption_timeout\": 2,\n    \"enable_interruptions\": true,\n    \"resume_false_interruption\": true,\n    \"turn_detection\": \"vad\",\n    \"vad_activation_threshold\": 0.5,\n    \"vad_min_silence_duration_ms\": 550,\n    \"vad_min_speech_duration_ms\": 50,\n    \"vad_prefix_padding_duration_ms\": 300,\n    \"endpointing_mode\": \"fixed\",\n    \"min_endpointing_delay\": 0.5,\n    \"max_endpointing_delay\": 3\n  },\n  \"audio\": {\n    \"sample_rate\": 24000,\n    \"num_channels\": 1\n  },\n  \"wake_word\": {\n    \"enabled\": true,\n    \"words\": [\n      \"hello\"\n    ],\n    \"timeout\": 30\n  }\n}")

	req, _ := http.NewRequest("POST", url, payload)

	req.Header.Add("Authorization", "<apiKey>")
	req.Header.Add("Content-Type", "application/json")

	res, _ := http.DefaultClient.Do(req)

	defer res.Body.Close()
	body, _ := io.ReadAll(res.Body)

	fmt.Println(res)
	fmt.Println(string(body))

}
```

```ruby Pipeline - OpenAI + OpenAI + Deepgram
require 'uri'
require 'net/http'

url = URI("https://api.meetstream.ai/api/v1/mia")

http = Net::HTTP.new(url.host, url.port)
http.use_ssl = true

request = Net::HTTP::Post.new(url)
request["Authorization"] = '<apiKey>'
request["Content-Type"] = 'application/json'
request.body = "{\n  \"agent_name\": \"V's Pipeline Agent\",\n  \"mode\": \"pipeline\",\n  \"model\": {\n    \"provider\": \"openai\",\n    \"model\": \"gpt-5-nano\",\n    \"system_prompt\": \"Hi\",\n    \"first_message\": \"Hi\",\n    \"temperature\": 0.8,\n    \"max_tokens\": 150,\n    \"top_p\": 1,\n    \"frequency_penalty\": 0,\n    \"presence_penalty\": 0\n  },\n  \"voice\": {\n    \"provider\": \"openai\",\n    \"model\": \"tts-1\",\n    \"voice_id\": \"alloy\",\n    \"speed\": 1\n  },\n  \"transcriber\": {\n    \"provider\": \"deepgram\",\n    \"model\": \"nova-2\",\n    \"language\": \"en\",\n    \"boostwords\": [\n      \"hey assistant\",\n      \"MeetStream\",\n      \"LiveKit\"\n    ]\n  },\n  \"agent\": {\n    \"tools\": [],\n    \"preemptive_generation\": false,\n    \"user_away_timeout\": 15,\n    \"interruption_mode\": \"adaptive\",\n    \"interruptions\": {\n      \"min_duration_seconds\": 0.5,\n      \"word_threshold\": 2\n    },\n    \"false_interruption_timeout\": 2,\n    \"enable_interruptions\": true,\n    \"resume_false_interruption\": true,\n    \"turn_detection\": \"vad\",\n    \"vad_activation_threshold\": 0.5,\n    \"vad_min_silence_duration_ms\": 550,\n    \"vad_min_speech_duration_ms\": 50,\n    \"vad_prefix_padding_duration_ms\": 300,\n    \"endpointing_mode\": \"fixed\",\n    \"min_endpointing_delay\": 0.5,\n    \"max_endpointing_delay\": 3\n  },\n  \"audio\": {\n    \"sample_rate\": 24000,\n    \"num_channels\": 1\n  },\n  \"wake_word\": {\n    \"enabled\": true,\n    \"words\": [\n      \"hello\"\n    ],\n    \"timeout\": 30\n  }\n}"

response = http.request(request)
puts response.read_body
```

```java Pipeline - OpenAI + OpenAI + Deepgram
import com.mashape.unirest.http.HttpResponse;
import com.mashape.unirest.http.Unirest;

HttpResponse<String> response = Unirest.post("https://api.meetstream.ai/api/v1/mia")
  .header("Authorization", "<apiKey>")
  .header("Content-Type", "application/json")
  .body("{\n  \"agent_name\": \"V's Pipeline Agent\",\n  \"mode\": \"pipeline\",\n  \"model\": {\n    \"provider\": \"openai\",\n    \"model\": \"gpt-5-nano\",\n    \"system_prompt\": \"Hi\",\n    \"first_message\": \"Hi\",\n    \"temperature\": 0.8,\n    \"max_tokens\": 150,\n    \"top_p\": 1,\n    \"frequency_penalty\": 0,\n    \"presence_penalty\": 0\n  },\n  \"voice\": {\n    \"provider\": \"openai\",\n    \"model\": \"tts-1\",\n    \"voice_id\": \"alloy\",\n    \"speed\": 1\n  },\n  \"transcriber\": {\n    \"provider\": \"deepgram\",\n    \"model\": \"nova-2\",\n    \"language\": \"en\",\n    \"boostwords\": [\n      \"hey assistant\",\n      \"MeetStream\",\n      \"LiveKit\"\n    ]\n  },\n  \"agent\": {\n    \"tools\": [],\n    \"preemptive_generation\": false,\n    \"user_away_timeout\": 15,\n    \"interruption_mode\": \"adaptive\",\n    \"interruptions\": {\n      \"min_duration_seconds\": 0.5,\n      \"word_threshold\": 2\n    },\n    \"false_interruption_timeout\": 2,\n    \"enable_interruptions\": true,\n    \"resume_false_interruption\": true,\n    \"turn_detection\": \"vad\",\n    \"vad_activation_threshold\": 0.5,\n    \"vad_min_silence_duration_ms\": 550,\n    \"vad_min_speech_duration_ms\": 50,\n    \"vad_prefix_padding_duration_ms\": 300,\n    \"endpointing_mode\": \"fixed\",\n    \"min_endpointing_delay\": 0.5,\n    \"max_endpointing_delay\": 3\n  },\n  \"audio\": {\n    \"sample_rate\": 24000,\n    \"num_channels\": 1\n  },\n  \"wake_word\": {\n    \"enabled\": true,\n    \"words\": [\n      \"hello\"\n    ],\n    \"timeout\": 30\n  }\n}")
  .asString();
```

```php Pipeline - OpenAI + OpenAI + Deepgram
<?php
require_once('vendor/autoload.php');

$client = new \GuzzleHttp\Client();

$response = $client->request('POST', 'https://api.meetstream.ai/api/v1/mia', [
  'body' => '{
  "agent_name": "V\'s Pipeline Agent",
  "mode": "pipeline",
  "model": {
    "provider": "openai",
    "model": "gpt-5-nano",
    "system_prompt": "Hi",
    "first_message": "Hi",
    "temperature": 0.8,
    "max_tokens": 150,
    "top_p": 1,
    "frequency_penalty": 0,
    "presence_penalty": 0
  },
  "voice": {
    "provider": "openai",
    "model": "tts-1",
    "voice_id": "alloy",
    "speed": 1
  },
  "transcriber": {
    "provider": "deepgram",
    "model": "nova-2",
    "language": "en",
    "boostwords": [
      "hey assistant",
      "MeetStream",
      "LiveKit"
    ]
  },
  "agent": {
    "tools": [],
    "preemptive_generation": false,
    "user_away_timeout": 15,
    "interruption_mode": "adaptive",
    "interruptions": {
      "min_duration_seconds": 0.5,
      "word_threshold": 2
    },
    "false_interruption_timeout": 2,
    "enable_interruptions": true,
    "resume_false_interruption": true,
    "turn_detection": "vad",
    "vad_activation_threshold": 0.5,
    "vad_min_silence_duration_ms": 550,
    "vad_min_speech_duration_ms": 50,
    "vad_prefix_padding_duration_ms": 300,
    "endpointing_mode": "fixed",
    "min_endpointing_delay": 0.5,
    "max_endpointing_delay": 3
  },
  "audio": {
    "sample_rate": 24000,
    "num_channels": 1
  },
  "wake_word": {
    "enabled": true,
    "words": [
      "hello"
    ],
    "timeout": 30
  }
}',
  'headers' => [
    'Authorization' => '<apiKey>',
    'Content-Type' => 'application/json',
  ],
]);

echo $response->getBody();
```

```csharp Pipeline - OpenAI + OpenAI + Deepgram
using RestSharp;

var client = new RestClient("https://api.meetstream.ai/api/v1/mia");
var request = new RestRequest(Method.POST);
request.AddHeader("Authorization", "<apiKey>");
request.AddHeader("Content-Type", "application/json");
request.AddParameter("application/json", "{\n  \"agent_name\": \"V's Pipeline Agent\",\n  \"mode\": \"pipeline\",\n  \"model\": {\n    \"provider\": \"openai\",\n    \"model\": \"gpt-5-nano\",\n    \"system_prompt\": \"Hi\",\n    \"first_message\": \"Hi\",\n    \"temperature\": 0.8,\n    \"max_tokens\": 150,\n    \"top_p\": 1,\n    \"frequency_penalty\": 0,\n    \"presence_penalty\": 0\n  },\n  \"voice\": {\n    \"provider\": \"openai\",\n    \"model\": \"tts-1\",\n    \"voice_id\": \"alloy\",\n    \"speed\": 1\n  },\n  \"transcriber\": {\n    \"provider\": \"deepgram\",\n    \"model\": \"nova-2\",\n    \"language\": \"en\",\n    \"boostwords\": [\n      \"hey assistant\",\n      \"MeetStream\",\n      \"LiveKit\"\n    ]\n  },\n  \"agent\": {\n    \"tools\": [],\n    \"preemptive_generation\": false,\n    \"user_away_timeout\": 15,\n    \"interruption_mode\": \"adaptive\",\n    \"interruptions\": {\n      \"min_duration_seconds\": 0.5,\n      \"word_threshold\": 2\n    },\n    \"false_interruption_timeout\": 2,\n    \"enable_interruptions\": true,\n    \"resume_false_interruption\": true,\n    \"turn_detection\": \"vad\",\n    \"vad_activation_threshold\": 0.5,\n    \"vad_min_silence_duration_ms\": 550,\n    \"vad_min_speech_duration_ms\": 50,\n    \"vad_prefix_padding_duration_ms\": 300,\n    \"endpointing_mode\": \"fixed\",\n    \"min_endpointing_delay\": 0.5,\n    \"max_endpointing_delay\": 3\n  },\n  \"audio\": {\n    \"sample_rate\": 24000,\n    \"num_channels\": 1\n  },\n  \"wake_word\": {\n    \"enabled\": true,\n    \"words\": [\n      \"hello\"\n    ],\n    \"timeout\": 30\n  }\n}", ParameterType.RequestBody);
IRestResponse response = client.Execute(request);
```

```swift Pipeline - OpenAI + OpenAI + Deepgram
import Foundation

let headers = [
  "Authorization": "<apiKey>",
  "Content-Type": "application/json"
]
let parameters = [
  "agent_name": "V's Pipeline Agent",
  "mode": "pipeline",
  "model": [
    "provider": "openai",
    "model": "gpt-5-nano",
    "system_prompt": "Hi",
    "first_message": "Hi",
    "temperature": 0.8,
    "max_tokens": 150,
    "top_p": 1,
    "frequency_penalty": 0,
    "presence_penalty": 0
  ],
  "voice": [
    "provider": "openai",
    "model": "tts-1",
    "voice_id": "alloy",
    "speed": 1
  ],
  "transcriber": [
    "provider": "deepgram",
    "model": "nova-2",
    "language": "en",
    "boostwords": ["hey assistant", "MeetStream", "LiveKit"]
  ],
  "agent": [
    "tools": [],
    "preemptive_generation": false,
    "user_away_timeout": 15,
    "interruption_mode": "adaptive",
    "interruptions": [
      "min_duration_seconds": 0.5,
      "word_threshold": 2
    ],
    "false_interruption_timeout": 2,
    "enable_interruptions": true,
    "resume_false_interruption": true,
    "turn_detection": "vad",
    "vad_activation_threshold": 0.5,
    "vad_min_silence_duration_ms": 550,
    "vad_min_speech_duration_ms": 50,
    "vad_prefix_padding_duration_ms": 300,
    "endpointing_mode": "fixed",
    "min_endpointing_delay": 0.5,
    "max_endpointing_delay": 3
  ],
  "audio": [
    "sample_rate": 24000,
    "num_channels": 1
  ],
  "wake_word": [
    "enabled": true,
    "words": ["hello"],
    "timeout": 30
  ]
] as [String : Any]

let postData = JSONSerialization.data(withJSONObject: parameters, options: [])

let request = NSMutableURLRequest(url: NSURL(string: "https://api.meetstream.ai/api/v1/mia")! as URL,
                                        cachePolicy: .useProtocolCachePolicy,
                                    timeoutInterval: 10.0)
request.httpMethod = "POST"
request.allHTTPHeaderFields = headers
request.httpBody = postData as Data

let session = URLSession.shared
let dataTask = session.dataTask(with: request as URLRequest, completionHandler: { (data, response, error) -> Void in
  if (error != nil) {
    print(error as Any)
  } else {
    let httpResponse = response as? HTTPURLResponse
    print(httpResponse)
  }
})

dataTask.resume()
```

```python Realtime - Avatar (Anam)
import requests

url = "https://api.meetstream.ai/api/v1/mia"

payload = {
    "agent_name": "New MIA Agent",
    "mode": "realtime",
    "model": {
        "provider": "openai",
        "model": "gpt-realtime-mini",
        "system_prompt": "You are a helpful AI meeting assistant. Keep responses concise and natural. Listen actively and provide value to the conversation.",
        "first_message": "Hey there! I am your AI assistant, ready to help with your meeting.",
        "temperature": 0.8,
        "voice": "coral",
        "modalities": ["text", "audio"],
        "max_response_output_tokens": 200
    },
    "agent": {
        "tools": [],
        "preemptive_generation": False,
        "user_away_timeout": 15,
        "interruptions": {
            "min_duration_seconds": 0.5,
            "word_threshold": 0
        },
        "false_interruption_timeout": 2,
        "vad_type": "server_vad",
        "enable_interruptions": True,
        "resume_false_interruption": True,
        "tools_enabled": False,
        "vad_threshold": 0.5,
        "vad_prefix_padding_ms": 0,
        "vad_silence_duration_ms": 200
    },
    "audio": {
        "sample_rate": 24000,
        "num_channels": 1
    },
    "Avatar": {
        "provider": "anam",
        "enabled": True,
        "avatar_id": "2644bbca-ee18-4f66-9e84-9360043c9883"
    }
}
headers = {
    "Authorization": "<apiKey>",
    "Content-Type": "application/json"
}

response = requests.post(url, json=payload, headers=headers)

print(response.json())
```

```javascript Realtime - Avatar (Anam)
const url = 'https://api.meetstream.ai/api/v1/mia';
const options = {
  method: 'POST',
  headers: {Authorization: '<apiKey>', 'Content-Type': 'application/json'},
  body: '{"agent_name":"New MIA Agent","mode":"realtime","model":{"provider":"openai","model":"gpt-realtime-mini","system_prompt":"You are a helpful AI meeting assistant. Keep responses concise and natural. Listen actively and provide value to the conversation.","first_message":"Hey there! I am your AI assistant, ready to help with your meeting.","temperature":0.8,"voice":"coral","modalities":["text","audio"],"max_response_output_tokens":200},"agent":{"tools":[],"preemptive_generation":false,"user_away_timeout":15,"interruptions":{"min_duration_seconds":0.5,"word_threshold":0},"false_interruption_timeout":2,"vad_type":"server_vad","enable_interruptions":true,"resume_false_interruption":true,"tools_enabled":false,"vad_threshold":0.5,"vad_prefix_padding_ms":0,"vad_silence_duration_ms":200},"audio":{"sample_rate":24000,"num_channels":1},"Avatar":{"provider":"anam","enabled":true,"avatar_id":"2644bbca-ee18-4f66-9e84-9360043c9883"}}'
};

try {
  const response = await fetch(url, options);
  const data = await response.json();
  console.log(data);
} catch (error) {
  console.error(error);
}
```

```go Realtime - Avatar (Anam)
package main

import (
	"fmt"
	"strings"
	"net/http"
	"io"
)

func main() {

	url := "https://api.meetstream.ai/api/v1/mia"

	payload := strings.NewReader("{\n  \"agent_name\": \"New MIA Agent\",\n  \"mode\": \"realtime\",\n  \"model\": {\n    \"provider\": \"openai\",\n    \"model\": \"gpt-realtime-mini\",\n    \"system_prompt\": \"You are a helpful AI meeting assistant. Keep responses concise and natural. Listen actively and provide value to the conversation.\",\n    \"first_message\": \"Hey there! I am your AI assistant, ready to help with your meeting.\",\n    \"temperature\": 0.8,\n    \"voice\": \"coral\",\n    \"modalities\": [\n      \"text\",\n      \"audio\"\n    ],\n    \"max_response_output_tokens\": 200\n  },\n  \"agent\": {\n    \"tools\": [],\n    \"preemptive_generation\": false,\n    \"user_away_timeout\": 15,\n    \"interruptions\": {\n      \"min_duration_seconds\": 0.5,\n      \"word_threshold\": 0\n    },\n    \"false_interruption_timeout\": 2,\n    \"vad_type\": \"server_vad\",\n    \"enable_interruptions\": true,\n    \"resume_false_interruption\": true,\n    \"tools_enabled\": false,\n    \"vad_threshold\": 0.5,\n    \"vad_prefix_padding_ms\": 0,\n    \"vad_silence_duration_ms\": 200\n  },\n  \"audio\": {\n    \"sample_rate\": 24000,\n    \"num_channels\": 1\n  },\n  \"Avatar\": {\n    \"provider\": \"anam\",\n    \"enabled\": true,\n    \"avatar_id\": \"2644bbca-ee18-4f66-9e84-9360043c9883\"\n  }\n}")

	req, _ := http.NewRequest("POST", url, payload)

	req.Header.Add("Authorization", "<apiKey>")
	req.Header.Add("Content-Type", "application/json")

	res, _ := http.DefaultClient.Do(req)

	defer res.Body.Close()
	body, _ := io.ReadAll(res.Body)

	fmt.Println(res)
	fmt.Println(string(body))

}
```

```ruby Realtime - Avatar (Anam)
require 'uri'
require 'net/http'

url = URI("https://api.meetstream.ai/api/v1/mia")

http = Net::HTTP.new(url.host, url.port)
http.use_ssl = true

request = Net::HTTP::Post.new(url)
request["Authorization"] = '<apiKey>'
request["Content-Type"] = 'application/json'
request.body = "{\n  \"agent_name\": \"New MIA Agent\",\n  \"mode\": \"realtime\",\n  \"model\": {\n    \"provider\": \"openai\",\n    \"model\": \"gpt-realtime-mini\",\n    \"system_prompt\": \"You are a helpful AI meeting assistant. Keep responses concise and natural. Listen actively and provide value to the conversation.\",\n    \"first_message\": \"Hey there! I am your AI assistant, ready to help with your meeting.\",\n    \"temperature\": 0.8,\n    \"voice\": \"coral\",\n    \"modalities\": [\n      \"text\",\n      \"audio\"\n    ],\n    \"max_response_output_tokens\": 200\n  },\n  \"agent\": {\n    \"tools\": [],\n    \"preemptive_generation\": false,\n    \"user_away_timeout\": 15,\n    \"interruptions\": {\n      \"min_duration_seconds\": 0.5,\n      \"word_threshold\": 0\n    },\n    \"false_interruption_timeout\": 2,\n    \"vad_type\": \"server_vad\",\n    \"enable_interruptions\": true,\n    \"resume_false_interruption\": true,\n    \"tools_enabled\": false,\n    \"vad_threshold\": 0.5,\n    \"vad_prefix_padding_ms\": 0,\n    \"vad_silence_duration_ms\": 200\n  },\n  \"audio\": {\n    \"sample_rate\": 24000,\n    \"num_channels\": 1\n  },\n  \"Avatar\": {\n    \"provider\": \"anam\",\n    \"enabled\": true,\n    \"avatar_id\": \"2644bbca-ee18-4f66-9e84-9360043c9883\"\n  }\n}"

response = http.request(request)
puts response.read_body
```

```java Realtime - Avatar (Anam)
import com.mashape.unirest.http.HttpResponse;
import com.mashape.unirest.http.Unirest;

HttpResponse<String> response = Unirest.post("https://api.meetstream.ai/api/v1/mia")
  .header("Authorization", "<apiKey>")
  .header("Content-Type", "application/json")
  .body("{\n  \"agent_name\": \"New MIA Agent\",\n  \"mode\": \"realtime\",\n  \"model\": {\n    \"provider\": \"openai\",\n    \"model\": \"gpt-realtime-mini\",\n    \"system_prompt\": \"You are a helpful AI meeting assistant. Keep responses concise and natural. Listen actively and provide value to the conversation.\",\n    \"first_message\": \"Hey there! I am your AI assistant, ready to help with your meeting.\",\n    \"temperature\": 0.8,\n    \"voice\": \"coral\",\n    \"modalities\": [\n      \"text\",\n      \"audio\"\n    ],\n    \"max_response_output_tokens\": 200\n  },\n  \"agent\": {\n    \"tools\": [],\n    \"preemptive_generation\": false,\n    \"user_away_timeout\": 15,\n    \"interruptions\": {\n      \"min_duration_seconds\": 0.5,\n      \"word_threshold\": 0\n    },\n    \"false_interruption_timeout\": 2,\n    \"vad_type\": \"server_vad\",\n    \"enable_interruptions\": true,\n    \"resume_false_interruption\": true,\n    \"tools_enabled\": false,\n    \"vad_threshold\": 0.5,\n    \"vad_prefix_padding_ms\": 0,\n    \"vad_silence_duration_ms\": 200\n  },\n  \"audio\": {\n    \"sample_rate\": 24000,\n    \"num_channels\": 1\n  },\n  \"Avatar\": {\n    \"provider\": \"anam\",\n    \"enabled\": true,\n    \"avatar_id\": \"2644bbca-ee18-4f66-9e84-9360043c9883\"\n  }\n}")
  .asString();
```

```php Realtime - Avatar (Anam)
<?php
require_once('vendor/autoload.php');

$client = new \GuzzleHttp\Client();

$response = $client->request('POST', 'https://api.meetstream.ai/api/v1/mia', [
  'body' => '{
  "agent_name": "New MIA Agent",
  "mode": "realtime",
  "model": {
    "provider": "openai",
    "model": "gpt-realtime-mini",
    "system_prompt": "You are a helpful AI meeting assistant. Keep responses concise and natural. Listen actively and provide value to the conversation.",
    "first_message": "Hey there! I am your AI assistant, ready to help with your meeting.",
    "temperature": 0.8,
    "voice": "coral",
    "modalities": [
      "text",
      "audio"
    ],
    "max_response_output_tokens": 200
  },
  "agent": {
    "tools": [],
    "preemptive_generation": false,
    "user_away_timeout": 15,
    "interruptions": {
      "min_duration_seconds": 0.5,
      "word_threshold": 0
    },
    "false_interruption_timeout": 2,
    "vad_type": "server_vad",
    "enable_interruptions": true,
    "resume_false_interruption": true,
    "tools_enabled": false,
    "vad_threshold": 0.5,
    "vad_prefix_padding_ms": 0,
    "vad_silence_duration_ms": 200
  },
  "audio": {
    "sample_rate": 24000,
    "num_channels": 1
  },
  "Avatar": {
    "provider": "anam",
    "enabled": true,
    "avatar_id": "2644bbca-ee18-4f66-9e84-9360043c9883"
  }
}',
  'headers' => [
    'Authorization' => '<apiKey>',
    'Content-Type' => 'application/json',
  ],
]);

echo $response->getBody();
```

```csharp Realtime - Avatar (Anam)
using RestSharp;

var client = new RestClient("https://api.meetstream.ai/api/v1/mia");
var request = new RestRequest(Method.POST);
request.AddHeader("Authorization", "<apiKey>");
request.AddHeader("Content-Type", "application/json");
request.AddParameter("application/json", "{\n  \"agent_name\": \"New MIA Agent\",\n  \"mode\": \"realtime\",\n  \"model\": {\n    \"provider\": \"openai\",\n    \"model\": \"gpt-realtime-mini\",\n    \"system_prompt\": \"You are a helpful AI meeting assistant. Keep responses concise and natural. Listen actively and provide value to the conversation.\",\n    \"first_message\": \"Hey there! I am your AI assistant, ready to help with your meeting.\",\n    \"temperature\": 0.8,\n    \"voice\": \"coral\",\n    \"modalities\": [\n      \"text\",\n      \"audio\"\n    ],\n    \"max_response_output_tokens\": 200\n  },\n  \"agent\": {\n    \"tools\": [],\n    \"preemptive_generation\": false,\n    \"user_away_timeout\": 15,\n    \"interruptions\": {\n      \"min_duration_seconds\": 0.5,\n      \"word_threshold\": 0\n    },\n    \"false_interruption_timeout\": 2,\n    \"vad_type\": \"server_vad\",\n    \"enable_interruptions\": true,\n    \"resume_false_interruption\": true,\n    \"tools_enabled\": false,\n    \"vad_threshold\": 0.5,\n    \"vad_prefix_padding_ms\": 0,\n    \"vad_silence_duration_ms\": 200\n  },\n  \"audio\": {\n    \"sample_rate\": 24000,\n    \"num_channels\": 1\n  },\n  \"Avatar\": {\n    \"provider\": \"anam\",\n    \"enabled\": true,\n    \"avatar_id\": \"2644bbca-ee18-4f66-9e84-9360043c9883\"\n  }\n}", ParameterType.RequestBody);
IRestResponse response = client.Execute(request);
```

```swift Realtime - Avatar (Anam)
import Foundation

let headers = [
  "Authorization": "<apiKey>",
  "Content-Type": "application/json"
]
let parameters = [
  "agent_name": "New MIA Agent",
  "mode": "realtime",
  "model": [
    "provider": "openai",
    "model": "gpt-realtime-mini",
    "system_prompt": "You are a helpful AI meeting assistant. Keep responses concise and natural. Listen actively and provide value to the conversation.",
    "first_message": "Hey there! I am your AI assistant, ready to help with your meeting.",
    "temperature": 0.8,
    "voice": "coral",
    "modalities": ["text", "audio"],
    "max_response_output_tokens": 200
  ],
  "agent": [
    "tools": [],
    "preemptive_generation": false,
    "user_away_timeout": 15,
    "interruptions": [
      "min_duration_seconds": 0.5,
      "word_threshold": 0
    ],
    "false_interruption_timeout": 2,
    "vad_type": "server_vad",
    "enable_interruptions": true,
    "resume_false_interruption": true,
    "tools_enabled": false,
    "vad_threshold": 0.5,
    "vad_prefix_padding_ms": 0,
    "vad_silence_duration_ms": 200
  ],
  "audio": [
    "sample_rate": 24000,
    "num_channels": 1
  ],
  "Avatar": [
    "provider": "anam",
    "enabled": true,
    "avatar_id": "2644bbca-ee18-4f66-9e84-9360043c9883"
  ]
] as [String : Any]

let postData = JSONSerialization.data(withJSONObject: parameters, options: [])

let request = NSMutableURLRequest(url: NSURL(string: "https://api.meetstream.ai/api/v1/mia")! as URL,
                                        cachePolicy: .useProtocolCachePolicy,
                                    timeoutInterval: 10.0)
request.httpMethod = "POST"
request.allHTTPHeaderFields = headers
request.httpBody = postData as Data

let session = URLSession.shared
let dataTask = session.dataTask(with: request as URLRequest, completionHandler: { (data, response, error) -> Void in
  if (error != nil) {
    print(error as Any)
  } else {
    let httpResponse = response as? HTTPURLResponse
    print(httpResponse)
  }
})

dataTask.resume()
```