Compare commits

...

5 Commits

Author SHA1 Message Date
raojianb
64bb29760e feat: simplify api 2024-09-22 01:53:18 -07:00
Joby
befb6228f5 feat: voice chat demo (#13) 2024-09-20 21:38:26 -07:00
James Zow
2454a4729d Update README.md (#8) 2024-09-06 21:00:48 -07:00
Joby
935b10279b Feat/livekit (#9)
* feat: using version 2.0 skd

* feat: using version 2.0 skd

* feat: using version 2.0 skd
2024-09-06 20:59:55 -07:00
jobyrao-heygen
052c2b3ad1 Merge pull request #6 from HeyGen-Official/fix/text-const-error
fix: text constant error
2024-07-25 16:26:44 -07:00
6 changed files with 162 additions and 407 deletions

View File

@@ -15,7 +15,7 @@ Feel free to play around with the existing code and please leave any feedback fo
3. Run `npm install` (assuming you have npm installed. If not, please follow these instructions: https://docs.npmjs.com/downloading-and-installing-node-js-and-npm/) 3. Run `npm install` (assuming you have npm installed. If not, please follow these instructions: https://docs.npmjs.com/downloading-and-installing-node-js-and-npm/)
4. Enter your HeyGen Enterprise API Token or Trial Token in the `.env` file. Replace `PLACEHOLDER-API-KEY` with your API key. This will allow the Client app to generate secure Access Tokens with which to create interactive sessions. 4. Enter your HeyGen Enterprise API Token or Trial Token in the `.env` file. Replace `HEYGEN_API_KEY` with your API key. This will allow the Client app to generate secure Access Tokens with which to create interactive sessions.
You can retrieve either the API Key or Trial Token by logging in to HeyGen and navigating to this page in your settings: [https://app.heygen.com/settings?nav=API]. NOTE: use the trial token if you don't have an enterprise API token yet. You can retrieve either the API Key or Trial Token by logging in to HeyGen and navigating to this page in your settings: [https://app.heygen.com/settings?nav=API]. NOTE: use the trial token if you don't have an enterprise API token yet.
@@ -67,14 +67,6 @@ In order to use a private Avatar created under your own account in Interactive A
Please note that Photo Avatars are not compatible with Interactive Avatar and cannot be used. Please note that Photo Avatars are not compatible with Interactive Avatar and cannot be used.
### Which voices can I use with my Interactive Avatar?
Most of HeyGen's AI Voices can be used with the Interactive Avatar API. To find the Voice IDs that you can use, please use the List Voices v2 endpoint from HeyGen: [https://docs.heygen.com/reference/list-voices-v2]
Please note that for voices that support Emotions, such as Christine and Tarquin, you need to pass in the Emotion string in the Voice Setting parameter: [https://docs.heygen.com/reference/new-session-copy#voicesetting]
You can also set the speed at which the Interactive Avatar speaks by passing in a Rate in the Voice Setting.
### Where can I read more about enterprise-level usage of the Interactive Avatar API? ### Where can I read more about enterprise-level usage of the Interactive Avatar API?
Please read our Interactive Avatar 101 article for more information on pricing and how to increase your concurrent session limit: https://help.heygen.com/en/articles/9182113-interactive-avatar-101-your-ultimate-guide Please read our Interactive Avatar 101 article for more information on pricing and how to increase your concurrent session limit: https://help.heygen.com/en/articles/9182113-interactive-avatar-101-your-ultimate-guide

View File

@@ -13,7 +13,7 @@ export async function POST() {
headers: { headers: {
"x-api-key": HEYGEN_API_KEY, "x-api-key": HEYGEN_API_KEY,
}, },
} },
); );
const data = await res.json(); const data = await res.json();

View File

@@ -20,116 +20,3 @@ export const AVATARS = [
name: "Joshua Heygen CEO", name: "Joshua Heygen CEO",
}, },
]; ];
export const VOICES = [
{
voice_id: "077ab11b14f04ce0b49b5f6e5cc20979",
language: "English",
gender: "Male",
name: "Paul - Natural",
preview_audio:
"https://static.heygen.ai/voice_preview/k6dKrFe85PisZ3FMLeppUM.mp3",
support_pause: true,
emotion_support: false,
},
{
voice_id: "131a436c47064f708210df6628ef8f32",
language: "English",
gender: "Female",
name: "Amber - Friendly",
preview_audio:
"https://static.heygen.ai/voice_preview/5HHGT48B6g6aSg2buYcBvw.wav",
support_pause: true,
emotion_support: false,
},
{
voice_id: "0ebe70d83b2349529e56492c002c9572",
language: "English",
gender: "Male",
name: "Antoni - Friendly",
preview_audio:
"https://static.heygen.ai/voice_preview/TwupgZ2az5RiTnmAifPmmS.mp3",
support_pause: true,
emotion_support: false,
},
{
voice_id: "1bd001e7e50f421d891986aad5158bc8",
language: "English",
gender: "Female",
name: "Sara - Cheerful",
preview_audio:
"https://static.heygen.ai/voice_preview/func8CFnfVLKF2VzGDCDCR.wav",
support_pause: true,
emotion_support: false,
},
{
voice_id: "001cc6d54eae4ca2b5fb16ca8e8eb9bb",
language: "Spanish",
gender: "Male",
name: "Elias - Natural",
preview_audio:
"https://static.heygen.ai/voice_preview/JmCb3rgMZnCjCAA9aacnGj.wav",
support_pause: false,
emotion_support: false,
},
{
voice_id: "00988b7d451d0722635ff7b2b9540a7b",
language: "Portuguese",
gender: "Female",
name: "Brenda - Professional",
preview_audio:
"https://static.heygen.ai/voice_preview/fec6396adb73461c9997b2c0d7759b7b.wav",
support_pause: true,
emotion_support: false,
},
{
voice_id: "00c8fd447ad7480ab1785825978a2215",
language: "Chinese",
gender: "Female",
name: "Xiaoxuan - Serious",
preview_audio:
"https://static.heygen.ai/voice_preview/909633f8d34e408a9aaa4e1b60586865.wav",
support_pause: true,
emotion_support: false,
},
{
voice_id: "00ed77fac8b84ffcb2ab52739b9dccd3",
language: "Latvian",
gender: "Male",
name: "Nils - Affinity",
preview_audio:
"https://static.heygen.ai/voice_preview/KwTwAz3R4aBFN69fEYQFdX.wav",
support_pause: true,
emotion_support: false,
},
{
voice_id: "02bec3b4cb514722a84e4e18d596fddf",
language: "Arabic",
gender: "Female",
name: "Fatima - Professional",
preview_audio:
"https://static.heygen.ai/voice_preview/930a245487fe42158c810ac76b8ddbab.wav",
support_pause: true,
emotion_support: false,
},
{
voice_id: "04e95f5bcb8b4620a2c4ef45b8a4481a",
language: "Ukrainian",
gender: "Female",
name: "Polina - Professional",
preview_audio:
"https://static.heygen.ai/voice_preview/ntekV94yFpvv4RgBVPqW7c.wav",
support_pause: true,
emotion_support: false,
},
{
voice_id: "071d6bea6a7f455b82b6364dab9104a2",
language: "German",
gender: "Male",
name: "Jan - Natural",
preview_audio:
"https://static.heygen.ai/voice_preview/fa3728bed81a4d11b8ccef10506af5f4.wav",
support_pause: true,
emotion_support: false,
},
];

View File

@@ -1,34 +1,13 @@
"use client"; "use client";
import InteractiveAvatar from "@/components/InteractiveAvatar"; import InteractiveAvatar from "@/components/InteractiveAvatar";
import InteractiveAvatarCode from "@/components/InteractiveAvatarCode";
import { Tab, Tabs } from "@nextui-org/react";
export default function App() { export default function App() {
const tabs = [
{
id: "demo",
label: "Demo",
content: <InteractiveAvatar />,
},
{
id: "code",
label: "Code",
content: <InteractiveAvatarCode />,
},
];
return ( return (
<div className="w-screen h-screen flex flex-col"> <div className="w-screen h-screen flex flex-col">
<div className="w-[900px] flex flex-col items-start justify-start gap-5 mx-auto pt-4 pb-20"> <div className="w-[900px] flex flex-col items-start justify-start gap-5 mx-auto pt-4 pb-20">
<div className="w-full"> <div className="w-full">
<Tabs items={tabs}> <InteractiveAvatar />
{(items) => (
<Tab key={items.id} title={items.label}>
{items.content}
</Tab>
)}
</Tabs>
</div> </div>
</div> </div>
</div> </div>

View File

@@ -1,8 +1,8 @@
import { AVATARS, VOICES } from "@/app/lib/constants"; import type { StartAvatarResponse } from "@heygen/streaming-avatar";
import {
Configuration, import StreamingAvatar, {
NewSessionData, AvatarQuality,
StreamingAvatarApi, StreamingEvents,
} from "@heygen/streaming-avatar"; } from "@heygen/streaming-avatar";
import { import {
Button, Button,
@@ -14,63 +14,30 @@ import {
Select, Select,
SelectItem, SelectItem,
Spinner, Spinner,
Tooltip, Chip,
Tabs,
Tab,
} from "@nextui-org/react"; } from "@nextui-org/react";
import { Microphone, MicrophoneStage } from "@phosphor-icons/react";
import { useChat } from "ai/react";
import clsx from "clsx";
import OpenAI from "openai";
import { useEffect, useRef, useState } from "react"; import { useEffect, useRef, useState } from "react";
import { useMemoizedFn, usePrevious } from "ahooks";
import InteractiveAvatarTextInput from "./InteractiveAvatarTextInput"; import InteractiveAvatarTextInput from "./InteractiveAvatarTextInput";
const openai = new OpenAI({ import { AVATARS } from "@/app/lib/constants";
apiKey: process.env.NEXT_PUBLIC_OPENAI_API_KEY,
dangerouslyAllowBrowser: true,
});
export default function InteractiveAvatar() { export default function InteractiveAvatar() {
const [isLoadingSession, setIsLoadingSession] = useState(false); const [isLoadingSession, setIsLoadingSession] = useState(false);
const [isLoadingRepeat, setIsLoadingRepeat] = useState(false); const [isLoadingRepeat, setIsLoadingRepeat] = useState(false);
const [isLoadingChat, setIsLoadingChat] = useState(false);
const [stream, setStream] = useState<MediaStream>(); const [stream, setStream] = useState<MediaStream>();
const [debug, setDebug] = useState<string>(); const [debug, setDebug] = useState<string>();
const [knowledgeId, setKnowledgeId] = useState<string>("");
const [avatarId, setAvatarId] = useState<string>(""); const [avatarId, setAvatarId] = useState<string>("");
const [voiceId, setVoiceId] = useState<string>(""); const [data, setData] = useState<StartAvatarResponse>();
const [data, setData] = useState<NewSessionData>();
const [text, setText] = useState<string>(""); const [text, setText] = useState<string>("");
const [initialized, setInitialized] = useState(false); // Track initialization
const [recording, setRecording] = useState(false); // Track recording state
const mediaStream = useRef<HTMLVideoElement>(null); const mediaStream = useRef<HTMLVideoElement>(null);
const avatar = useRef<StreamingAvatarApi | null>(null); const avatar = useRef<StreamingAvatar | null>(null);
const mediaRecorder = useRef<MediaRecorder | null>(null); const [chatMode, setChatMode] = useState("text_mode");
const audioChunks = useRef<Blob[]>([]); const [isUserTalking, setIsUserTalking] = useState(false);
const { input, setInput, handleSubmit } = useChat({
onFinish: async (message) => {
console.log("ChatGPT Response:", message);
if (!initialized || !avatar.current) {
setDebug("Avatar API not initialized");
return;
}
//send the ChatGPT response to the Interactive Avatar
await avatar.current
.speak({
taskRequest: { text: message.content, sessionId: data?.sessionId },
})
.catch((e) => {
setDebug(e.message);
});
setIsLoadingChat(false);
},
initialMessages: [
{
id: "1",
role: "system",
content: "You are a helpful assistant.",
},
],
});
async function fetchAccessToken() { async function fetchAccessToken() {
try { try {
@@ -78,114 +45,121 @@ export default function InteractiveAvatar() {
method: "POST", method: "POST",
}); });
const token = await response.text(); const token = await response.text();
console.log("Access Token:", token); // Log the token to verify console.log("Access Token:", token); // Log the token to verify
return token; return token;
} catch (error) { } catch (error) {
console.error("Error fetching access token:", error); console.error("Error fetching access token:", error);
return "";
} }
return "";
} }
async function startSession() { async function startSession() {
setIsLoadingSession(true); setIsLoadingSession(true);
await updateToken(); const newToken = await fetchAccessToken();
if (!avatar.current) {
setDebug("Avatar API is not initialized"); avatar.current = new StreamingAvatar({
return; token: newToken,
} });
avatar.current.on(StreamingEvents.AVATAR_START_TALKING, (e) => {
console.log("Avatar started talking", e);
});
avatar.current.on(StreamingEvents.AVATAR_STOP_TALKING, (e) => {
console.log("Avatar stopped talking", e);
});
avatar.current.on(StreamingEvents.STREAM_DISCONNECTED, () => {
console.log("Stream disconnected");
endSession();
});
avatar.current?.on(StreamingEvents.STREAM_READY, (event) => {
console.log(">>>>> Stream ready:", event.detail);
setStream(event.detail);
});
avatar.current?.on(StreamingEvents.USER_START, (event) => {
console.log(">>>>> User started talking:", event);
setIsUserTalking(true);
});
avatar.current?.on(StreamingEvents.USER_STOP, (event) => {
console.log(">>>>> User stopped talking:", event);
setIsUserTalking(false);
});
try { try {
const res = await avatar.current.createStartAvatar( const res = await avatar.current.createStartAvatar({
{ quality: AvatarQuality.Low,
newSessionRequest: { avatarName: avatarId,
quality: "low", knowledgeId: knowledgeId,
avatarName: avatarId, });
voice: { voiceId: voiceId },
},
},
setDebug
);
setData(res); setData(res);
setStream(avatar.current.mediaStream); // default to voice mode
await avatar.current?.startVoiceChat();
setChatMode("voice_mode");
} catch (error) { } catch (error) {
console.error("Error starting avatar session:", error); console.error("Error starting avatar session:", error);
setDebug( } finally {
`There was an error starting the session. ${voiceId ? "This custom voice ID may not be supported." : ""}` setIsLoadingSession(false);
);
} }
setIsLoadingSession(false);
} }
async function updateToken() {
const newToken = await fetchAccessToken();
console.log("Updating Access Token:", newToken); // Log token for debugging
avatar.current = new StreamingAvatarApi(
new Configuration({ accessToken: newToken })
);
const startTalkCallback = (e: any) => {
console.log("Avatar started talking", e);
};
const stopTalkCallback = (e: any) => {
console.log("Avatar stopped talking", e);
};
console.log("Adding event handlers:", avatar.current);
avatar.current.addEventHandler("avatar_start_talking", startTalkCallback);
avatar.current.addEventHandler("avatar_stop_talking", stopTalkCallback);
setInitialized(true);
}
async function handleInterrupt() {
if (!initialized || !avatar.current) {
setDebug("Avatar API not initialized");
return;
}
await avatar.current
.interrupt({ interruptRequest: { sessionId: data?.sessionId } })
.catch((e) => {
setDebug(e.message);
});
}
async function endSession() {
if (!initialized || !avatar.current) {
setDebug("Avatar API not initialized");
return;
}
await avatar.current.stopAvatar(
{ stopSessionRequest: { sessionId: data?.sessionId } },
setDebug
);
setStream(undefined);
}
async function handleSpeak() { async function handleSpeak() {
setIsLoadingRepeat(true); setIsLoadingRepeat(true);
if (!initialized || !avatar.current) { if (!avatar.current) {
setDebug("Avatar API not initialized"); setDebug("Avatar API not initialized");
return; return;
} }
await avatar.current await avatar.current
.speak({ taskRequest: { text: text, sessionId: data?.sessionId } }) .speak({ text: text })
.catch((e) => { .catch((e) => {
setDebug(e.message); setDebug(e.message);
}); });
setIsLoadingRepeat(false); setIsLoadingRepeat(false);
} }
async function handleInterrupt() {
if (!avatar.current) {
setDebug("Avatar API not initialized");
return;
}
await avatar.current
.interrupt()
.catch((e) => {
setDebug(e.message);
});
}
async function endSession() {
if (!avatar.current) {
setDebug("Avatar API not initialized");
return;
}
await avatar.current.stopAvatar();
setStream(undefined);
}
const handleChangeChatMode = useMemoizedFn(async (v) => {
if (v === chatMode) {
return;
}
if (v === "text_mode") {
avatar.current?.closeVoiceChat();
} else {
await avatar.current?.startVoiceChat();
}
setChatMode(v);
});
const previousText = usePrevious(text);
useEffect(() => {
if (!previousText && text) {
avatar.current?.startListening();
} else if (previousText && !text) {
avatar?.current?.stopListening();
}
}, [text, previousText]);
useEffect(() => { useEffect(() => {
async function init() {
const newToken = await fetchAccessToken();
console.log("Initializing with Access Token:", newToken); // Log token for debugging
avatar.current = new StreamingAvatarApi(
new Configuration({ accessToken: newToken, jitterBuffer: 200 })
);
setInitialized(true); // Set initialized to true
}
init();
return () => { return () => {
endSession(); endSession();
}; };
@@ -201,54 +175,6 @@ export default function InteractiveAvatar() {
} }
}, [mediaStream, stream]); }, [mediaStream, stream]);
function startRecording() {
navigator.mediaDevices
.getUserMedia({ audio: true })
.then((stream) => {
mediaRecorder.current = new MediaRecorder(stream);
mediaRecorder.current.ondataavailable = (event) => {
audioChunks.current.push(event.data);
};
mediaRecorder.current.onstop = () => {
const audioBlob = new Blob(audioChunks.current, {
type: "audio/wav",
});
audioChunks.current = [];
transcribeAudio(audioBlob);
};
mediaRecorder.current.start();
setRecording(true);
})
.catch((error) => {
console.error("Error accessing microphone:", error);
});
}
function stopRecording() {
if (mediaRecorder.current) {
mediaRecorder.current.stop();
setRecording(false);
}
}
async function transcribeAudio(audioBlob: Blob) {
try {
// Convert Blob to File
const audioFile = new File([audioBlob], "recording.wav", {
type: "audio/wav",
});
const response = await openai.audio.transcriptions.create({
model: "whisper-1",
file: audioFile,
});
const transcription = response.text;
console.log("Transcription: ", transcription);
setInput(transcription);
} catch (error) {
console.error("Error transcribing audio:", error);
}
}
return ( return (
<div className="w-full flex flex-col gap-4"> <div className="w-full flex flex-col gap-4">
<Card> <Card>
@@ -269,18 +195,18 @@ export default function InteractiveAvatar() {
</video> </video>
<div className="flex flex-col gap-2 absolute bottom-3 right-3"> <div className="flex flex-col gap-2 absolute bottom-3 right-3">
<Button <Button
size="md"
onClick={handleInterrupt}
className="bg-gradient-to-tr from-indigo-500 to-indigo-300 text-white rounded-lg" className="bg-gradient-to-tr from-indigo-500 to-indigo-300 text-white rounded-lg"
size="md"
variant="shadow" variant="shadow"
onClick={handleInterrupt}
> >
Interrupt task Interrupt task
</Button> </Button>
<Button <Button
size="md"
onClick={endSession}
className="bg-gradient-to-tr from-indigo-500 to-indigo-300 text-white rounded-lg" className="bg-gradient-to-tr from-indigo-500 to-indigo-300 text-white rounded-lg"
size="md"
variant="shadow" variant="shadow"
onClick={endSession}
> >
End session End session
</Button> </Button>
@@ -289,13 +215,21 @@ export default function InteractiveAvatar() {
) : !isLoadingSession ? ( ) : !isLoadingSession ? (
<div className="h-full justify-center items-center flex flex-col gap-8 w-[500px] self-center"> <div className="h-full justify-center items-center flex flex-col gap-8 w-[500px] self-center">
<div className="flex flex-col gap-2 w-full"> <div className="flex flex-col gap-2 w-full">
<p className="text-sm font-medium leading-none">
Custom Knowledge ID (optional)
</p>
<Input
placeholder="Enter a custom knowledge ID"
value={knowledgeId}
onChange={(e) => setKnowledgeId(e.target.value)}
/>
<p className="text-sm font-medium leading-none"> <p className="text-sm font-medium leading-none">
Custom Avatar ID (optional) Custom Avatar ID (optional)
</p> </p>
<Input <Input
placeholder="Enter a custom avatar ID"
value={avatarId} value={avatarId}
onChange={(e) => setAvatarId(e.target.value)} onChange={(e) => setAvatarId(e.target.value)}
placeholder="Enter a custom avatar ID"
/> />
<Select <Select
placeholder="Or select one from these example avatars" placeholder="Or select one from these example avatars"
@@ -314,97 +248,58 @@ export default function InteractiveAvatar() {
))} ))}
</Select> </Select>
</div> </div>
<div className="flex flex-col gap-2 w-full">
<p className="text-sm font-medium leading-none">
Custom Voice ID (optional)
</p>
<Input
value={voiceId}
onChange={(e) => setVoiceId(e.target.value)}
placeholder="Enter a custom voice ID"
/>
<Select
placeholder="Or select one from these example voices"
size="md"
onChange={(e) => {
setVoiceId(e.target.value);
}}
>
{VOICES.map((voice) => (
<SelectItem key={voice.voice_id} textValue={voice.voice_id}>
{voice.name} | {voice.language} | {voice.gender}
</SelectItem>
))}
</Select>
</div>
<Button <Button
size="md"
onClick={startSession}
className="bg-gradient-to-tr from-indigo-500 to-indigo-300 w-full text-white" className="bg-gradient-to-tr from-indigo-500 to-indigo-300 w-full text-white"
size="md"
variant="shadow" variant="shadow"
onClick={startSession}
> >
Start session Start session
</Button> </Button>
</div> </div>
) : ( ) : (
<Spinner size="lg" color="default" /> <Spinner color="default" size="lg" />
)} )}
</CardBody> </CardBody>
<Divider /> <Divider />
<CardFooter className="flex flex-col gap-3"> <CardFooter className="flex flex-col gap-3 relative">
<InteractiveAvatarTextInput <Tabs
label="Repeat" aria-label="Options"
placeholder="Type something for the avatar to repeat" selectedKey={chatMode}
input={text} onSelectionChange={(v) => {
onSubmit={handleSpeak} handleChangeChatMode(v);
setInput={setText}
disabled={!stream}
loading={isLoadingRepeat}
/>
<InteractiveAvatarTextInput
label="Chat"
placeholder="Chat with the avatar (uses ChatGPT)"
input={input}
onSubmit={() => {
setIsLoadingChat(true);
if (!input) {
setDebug("Please enter text to send to ChatGPT");
return;
}
handleSubmit();
}} }}
setInput={setInput} >
loading={isLoadingChat} <Tab key="text_mode" title="Text mode" />
endContent={ <Tab key="voice_mode" title="Voice mode" />
<Tooltip </Tabs>
content={!recording ? "Start recording" : "Stop recording"} {chatMode === "text_mode" ? (
<div className="w-full flex relative">
<InteractiveAvatarTextInput
disabled={!stream}
input={text}
label="Chat"
loading={isLoadingRepeat}
placeholder="Type something for the avatar to respond"
setInput={setText}
onSubmit={handleSpeak}
/>
{text && (
<Chip className="absolute right-16 top-3">Listening</Chip>
)}
</div>
) : (
<div className="w-full text-center">
<Button
isDisabled={!isUserTalking}
className="bg-gradient-to-tr from-indigo-500 to-indigo-300 text-white"
size="md"
variant="shadow"
> >
<Button {isUserTalking ? "Listening" : "Voice chat"}
onClick={!recording ? startRecording : stopRecording} </Button>
isDisabled={!stream} </div>
isIconOnly )}
className={clsx(
"mr-4 text-white",
!recording
? "bg-gradient-to-tr from-indigo-500 to-indigo-300"
: ""
)}
size="sm"
variant="shadow"
>
{!recording ? (
<Microphone size={20} />
) : (
<>
<div className="absolute h-full w-full bg-gradient-to-tr from-indigo-500 to-indigo-300 animate-pulse -z-10"></div>
<MicrophoneStage size={20} />
</>
)}
</Button>
</Tooltip>
}
disabled={!stream}
/>
</CardFooter> </CardFooter>
</Card> </Card>
<p className="font-mono text-right"> <p className="font-mono text-right">

View File

@@ -3,15 +3,16 @@
"version": "0.0.1", "version": "0.0.1",
"private": true, "private": true,
"scripts": { "scripts": {
"dev": "next dev --turbo", "dev": "node_modules/next/dist/bin/next dev",
"build": "next build", "build": "next build",
"start": "next start", "start": "next start",
"lint": "eslint . --ext .ts,.tsx -c .eslintrc.json --fix" "lint": "eslint . --ext .ts,.tsx -c .eslintrc.json --fix"
}, },
"dependencies": { "dependencies": {
"@ai-sdk/openai": "^0.0.34", "@ai-sdk/openai": "^0.0.34",
"@heygen/streaming-avatar": "^1.0.11", "@heygen/streaming-avatar": "^2.0.0-beta.1",
"@nextui-org/button": "2.0.34", "@nextui-org/button": "2.0.34",
"@nextui-org/chip": "^2.0.32",
"@nextui-org/code": "2.0.29", "@nextui-org/code": "2.0.29",
"@nextui-org/input": "2.2.2", "@nextui-org/input": "2.2.2",
"@nextui-org/kbd": "2.0.30", "@nextui-org/kbd": "2.0.30",
@@ -28,6 +29,7 @@
"@react-aria/visually-hidden": "3.8.12", "@react-aria/visually-hidden": "3.8.12",
"@uiw/codemirror-extensions-langs": "^4.22.1", "@uiw/codemirror-extensions-langs": "^4.22.1",
"@uiw/react-codemirror": "^4.22.1", "@uiw/react-codemirror": "^4.22.1",
"ahooks": "^3.8.1",
"ai": "^3.2.15", "ai": "^3.2.15",
"clsx": "2.1.1", "clsx": "2.1.1",
"framer-motion": "~11.1.1", "framer-motion": "~11.1.1",