Compare commits

...

7 Commits

Author SHA1 Message Date
Eddy Kim
bcaea9916d remove deprecated openai example 2024-12-03 14:30:05 -08:00
Joby
0fa4f0385e fix: incorrect home page url in header banner (#40) 2024-11-19 10:23:59 -08:00
Joby
6fca8b4d42 feat: upgrade the sdk to v2.0.8 (#39) 2024-11-17 22:38:11 -08:00
Joby
431281d47c feat: task mode (#30) 2024-10-22 16:58:38 -07:00
Joby
274a307e83 chore: update sdk version (#26) 2024-09-30 18:24:59 -07:00
Joby
03ef24b031 chore: update sdk version (#24) 2024-09-27 15:39:13 -07:00
Joby
21f6c6d468 feat: support knwoledge base (#23)
* feat: support knwoledge base

* feat: support knwoledge base
2024-09-25 19:53:26 -07:00
6 changed files with 10 additions and 43 deletions

4
.env
View File

@@ -1,3 +1 @@
HEYGEN_API_KEY=your Heygen API key
OPENAI_API_KEY=your OpenAI API key
NEXT_PUBLIC_OPENAI_API_KEY=your OpenAI API key
HEYGEN_API_KEY=your Heygen API key

View File

@@ -41,24 +41,6 @@ After you see Monica appear on the screen, you can enter text into the input lab
If you want to see a different Avatar or try a different voice, you can close the session and enter the IDs and then 'start' the session again. Please see below for information on where to retrieve different Avatar and voice IDs that you can use.
### Connecting to OpenAI
A common use case for a Interactive Avatar is to use it as the 'face' of an LLM that users can interact with. In this demo we have included functionality to showcase this by both accepting user input via voice (using OpenAI's Whisper library) and also sending that input to an OpenAI LLM model (using their Chat Completions endpoint).
Both of these features of this demo require an OpenAI API Key. If you do not have a paid OpenAI account, you can learn more on their website: [https://openai.com/index/openai-api/]
Without an OpenAI API Key, this functionality will not work, and the Interactive Avatar will only be able to repeat text input that you provide, and not demonstrate being the 'face' of an LLM. Regardless, this demo is meant to demonstrate what kinds of apps and experiences you can build with our Interactive Avatar SDK, so you can code your own connection to a different LLM if you so choose.
To add your Open AI API Key, fill copy it to the `OPENAI_API_KEY` and `NEXT_PUBLIC_OPENAI_API_KEY` variables in the `.env` file.
### How does the integration with OpenAI / ChatGPT work?
In this demo, we are calling the Chat Completions API from OpenAI in order to come up with some response to user input. You can see the relevant code in components/InteractiveAvatar.tsx.
In the initialMessages parameter, you can replace the content of the 'system' message with whatever 'knowledge base' or context that you would like the GPT-4o model to reply to the user's input with.
You can explore this API and the different parameters and models available here: [https://platform.openai.com/docs/guides/text-generation/chat-completions-api]
### Which Avatars can I use with this project?
By default, there are several Public Avatars that can be used in Interactive Avatar. (AKA Interactive Avatars.) You can find the Avatar IDs for these Public Avatars by navigating to [app.heygen.com/interactive-avatar](https://app.heygen.com/interactive-avatar) and clicking 'Select Avatar' and copying the avatar id.

View File

@@ -1,16 +0,0 @@
import { openai } from "@ai-sdk/openai";
import { streamText } from "ai";
// Allow streaming responses up to 30 seconds
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages } = await req.json();
const result = await streamText({
model: openai("gpt-4-turbo"),
messages,
});
return result.toAIStreamResponse();
}

View File

@@ -2,7 +2,7 @@ import type { StartAvatarResponse } from "@heygen/streaming-avatar";
import StreamingAvatar, {
AvatarQuality,
StreamingEvents, TaskType, VoiceEmotion,
StreamingEvents, TaskMode, TaskType, VoiceEmotion,
} from "@heygen/streaming-avatar";
import {
Button,
@@ -91,17 +91,20 @@ export default function InteractiveAvatar() {
const res = await avatar.current.createStartAvatar({
quality: AvatarQuality.Low,
avatarName: avatarId,
knowledgeId: knowledgeId,
knowledgeId: knowledgeId, // Or use a custom `knowledgeBase`.
voice: {
rate: 1.5, // 0.5 ~ 1.5
emotion: VoiceEmotion.EXCITED,
},
language: language,
disableIdleTimeout: true,
});
setData(res);
// default to voice mode
await avatar.current?.startVoiceChat();
await avatar.current?.startVoiceChat({
useSilencePrompt: false
});
setChatMode("voice_mode");
} catch (error) {
console.error("Error starting avatar session:", error);
@@ -117,7 +120,7 @@ export default function InteractiveAvatar() {
return;
}
// speak({ text: text, task_type: TaskType.REPEAT })
await avatar.current.speak({ text: text }).catch((e) => {
await avatar.current.speak({ text: text, taskType: TaskType.REPEAT, taskMode: TaskMode.SYNC }).catch((e) => {
setDebug(e.message);
});
setIsLoadingRepeat(false);

View File

@@ -28,7 +28,7 @@ export default function NavBar() {
<Link
isExternal
color="foreground"
href="https://app.heygen.com/interactive-avatar"
href="https://labs.heygen.com/interactive-avatar"
>
Avatars
</Link>

View File

@@ -10,7 +10,7 @@
},
"dependencies": {
"@ai-sdk/openai": "^0.0.34",
"@heygen/streaming-avatar": "^2.0.2",
"@heygen/streaming-avatar": "^2.0.8",
"@nextui-org/button": "2.0.34",
"@nextui-org/chip": "^2.0.32",
"@nextui-org/code": "2.0.29",