import React, { useState, useCallback } from 'react';
import { AIChatDialogue, AIChatInput, chatInputToMessage, Typography, Button } from '@douyinfe/semi-ui';
import { IconFixedStroked, IconFeishuLogo, IconBookOpenStroked, IconGit, IconFigma, IconWord, IconClose, IconTemplateStroked, IconSearch } from '@douyinfe/semi-icons';
const { Configure } = AIChatInput;
const simpleIsEqual = (a, b) => {
if (a === b) {
return true;
}
if (Number.isNaN(a) && Number.isNaN(b)) {
return true;
}
if (typeof a !== 'object' || a === null || typeof b !== 'object' || b === null) {
return false;
}
const isArrayA = Array.isArray(a);
const isArrayB = Array.isArray(b);
if (isArrayA !== isArrayB) {
return false;
}
const keysA = Object.keys(a);
const keysB = Object.keys(b);
if (keysA.length !== keysB.length) {
return false;
}
for (const key of keysA) {
if (!Object.prototype.hasOwnProperty.call(b, key)) {
return false;
}
if (!simpleIsEqual(a[key], b[key])) {
return false;
}
}
return true;
};
function AIChatInputWithDialogue() {
const inputOuterStyle = { margin: '12px', minHeight: 150, maxHeight: 300, flexShrink: 0 };
const editingInputOuterStyle = { margin: '12px 0px', maxHeight: 300, flexShrink: 0 };
const dialogueOuterStyle = { flex: 1, overflow: 'auto' };
const [sideBarVisible, setSideBarVisible] = useState(false);
const [messages, setMessages] = useState(defaultMessages);
const [generating, setGenerating] = useState(false);
const [references, setReferences] = useState([]);
const [sideBarContent, setSideBarContent] = useState({});
const renderLeftMenu = useCallback(() => (<>
<Configure.Select optionList={modelOptions} field="model" initValue="GPT-4o" />
<Configure.Button icon={<IconBookOpenStroked />} field="onlineSearch">Online search</Configure.Button>
<Configure.Mcp options={mcpOptions} />
<Configure.RadioButton options={radioButtonProps} field="thinkType" initValue="think"/>
</>), []);
const onChatsChange = useCallback((chats) => {
console.log('onChatsChange', chats);
setMessages(chats);
}, []);
const onContentChange = useCallback((content) => {
// console.log('onContentChange', content);
}, []);
const onReferenceClick = useCallback((item) => {
setReferences((references) => [...references, { ...item, id: `reference-${Date.now()}` }]);
}, []);
const handleReferenceDelete = useCallback((item) => {
const newReference = references.filter((ref) => ref.id !== item.id);
setReferences(newReference);
}, [references]);
const onMessageSend = useCallback((props) => {
setGenerating(true);
// 模拟发送请求
setMessages((messages) => [...messages, {
id: `message-${Date.now()}`,
...chatInputToMessage(props),
}]);
setReferences([]);
setTimeout(() => {
setGenerating(false);
}, 100);
setTimeout(() => {
// 模拟接口返回
setMessages((messages) => {
return [...messages, {
id: `message-${Date.now()}`,
role: 'assistant',
name: 'FE',
content: "This is a mock reply message.",
}];
});
}, 1000);
}, []);
const onEditMessageSend = useCallback((props) => {
const index = messages.findIndex((message) => message.editing);
const newMessages = [...messages.slice(0, index), {
id: `message-${Date.now()}`,
...chatInputToMessage(props),
}];
setMessages(newMessages);
}, [messages]);
const handleEditingReferenceDelete = useCallback((item) => {
const newMessages = messages.map((message) => {
if (message.editing) {
message.references = message.references.filter((ref) => ref.id !== item.id);
}
return message;
});
setMessages(newMessages);
}, [messages]);
const messageEditRender = useCallback((props) => {
return (
<AIChatInput
style={editingInputOuterStyle}
generating={false}
references={props.references}
uploadProps={{ ...uploadProps, defaultFileList: props.attachments }}
defaultContent={props.inputContents[0].text}
renderConfigureArea={renderLeftMenu}
// onContentChange={onContentChange}
onMessageSend={onEditMessageSend}
onReferenceDelete={handleEditingReferenceDelete}
/>
);
}, [messages, handleEditingReferenceDelete]);
const changeSideBarContent = useCallback((content) => {
setSideBarContent((oldContent) => {
if (!simpleIsEqual(content, oldContent)) {
setSideBarVisible(true);
} else {
setSideBarVisible(v => !v);
}
return content;
});
});
const onAnnotationClick = useCallback((annotations) => {
changeSideBarContent({
type: 'annotation',
value: annotations
});
}, [changeSideBarContent]);
const toggleSideBar = useCallback(() => {
setSideBarVisible(v => !v);
}, []);
const renderSideBarTitle = useCallback((content) => {
const { type, value } = content;
return <div style={{ display: 'flex', alignItems: 'center ', justifyContent: 'space-between', padding: 12, color: 'var(--semi-color-text)' }}>
{type === 'annotation' && <div style={{ fontSize: '16px', lineHeight: '22px', fontWeight: 600 }}>References</div>}
{type === 'resource' && <div style={{ fontSize: '16px', lineHeight: '22px', fontWeight: 600 }}>Product List</div>}
<Button onClick={toggleSideBar} theme="borderless" type="tertiary" icon={<IconClose />} style={{ padding: '0px', width: 24, height: 24 }} />
</div>;
}, [toggleSideBar]);
const renderSideBarBody = useCallback((content) => {
const { type, value = {} } = content;
if (type === 'annotation') {
return <div style={{ display: 'flex', flexDirection: 'column', rowGap: '12px', padding: '12px' }} >
{value.map((item, index) => (<div key={index} style={{ display: 'flex', flexDirection: 'column', rowGap: '8px' }} >
<span style={{ display: 'flex', alignItems: 'center ', columnGap: 4 }}>
<img style={{ width: 20, height: 20, borderRadius: '50%' }} src={item.logo}/>
<span style={{ fontSize: '14px', lineHeight: '20px', fontWeight: 600, color: 'var(--semi-color-text-0)' }}>{item.title}</span>
</span>
<Typography.Paragraph ellipsis={{ rows: 3 }} style={{ fontSize: '12px', lineHeight: '16px', color: 'var(--semi-color-text-1)' }} >{item.detail}</Typography.Paragraph>
</div>))}
</div>;
} else if (type === 'resource') {
return <div style={{ display: 'flex', flexDirection: 'column', rowGap: '12px', padding: '12px' }} >
<div style={{ display: 'flex', gap: 12, alignItems: 'center', }}>
<IconWord style={{ color: 'var(--semi-color-primary)' }} size='extra-large' /> {value.name}
</div>
</div>;
}
return <div>
</div>;
}, []);
const customRender = {
"resource": (item, message) => {
return <div
style={{
display: 'flex',
gap: 8,
backgroundColor: 'var(--semi-color-fill-0)',
padding: '12px 16px',
justifyContent: 'center',
alignItems: 'center',
borderRadius: '12px',
cursor: 'pointer'
}}
onClick={() => {
changeSideBarContent({
type: 'resource',
value: item
});
}}
>
<IconWord style={{ color: 'var(--semi-color-primary)' }} />
{item.name}
</div>;
},
};
return (
<div style={{ display: 'flex', columnGap: 10 }}>
<div style={{ display: 'flex', flexDirection: 'column', height: 'calc(100vh - 32px)', overflow: 'hidden', flexGrow: 1 }}>
<AIChatDialogue
style={dialogueOuterStyle}
roleConfig={roleConfig}
showReference={true}
align="leftRight"
mode="bubble"
chats={messages}
onChatsChange={onChatsChange}
onReferenceClick={onReferenceClick}
messageEditRender={messageEditRender}
onAnnotationClick={onAnnotationClick}
renderDialogueContentItem={customRender}
/>
<AIChatInput
style={inputOuterStyle}
placeholder={'Enter content or upload content'}
defaultContent={'I am a <input-slot placeholder="[Profession]">programmer</input-slot>,Please help me implement<input-slot placeholder="[Requirement Description]">a chat application in a Multi-Agent scenario</input-slot>'}
generating={generating}
references={references}
uploadProps={uploadProps}
renderConfigureArea={renderLeftMenu}
onContentChange={onContentChange}
onMessageSend={onMessageSend}
onStopGenerate={() => setGenerating(false)}
onReferenceDelete={handleReferenceDelete}
/>
</div>
{sideBarVisible && <div
style={{ flexShrink: 0, width: 300, height: 'calc(100vh - 32px)', borderRadius: '12px', border: '1px solid var(--semi-color-border)', flexShrink: 0 }}
>
{renderSideBarTitle(sideBarContent)}
{renderSideBarBody(sideBarContent)}
</div>}
</div>
);
}
const defaultMessages = [{
id: '1',
role: 'user',
content: 'I want to develop a chat application for a multi-agent scenario. Can you help me design it?',
status: 'completed',
}, {
id: '2',
role: 'assistant',
name: 'PM',
content: [{
type: 'message',
content: [{
type: 'input_text',
text: 'Received. To ensure the feasibility of the solution, I will first clarify the goals and scope:\n\n- Goal: Support multi-agent collaborative replies, where users can choose an agent or the system can automatically assign one.\n- MVP Features:\n 1) Basic conversation (text/image/file)\n 2) Agent identification and avatar\n 3) Input in progress and streaming output\n 4) Display of cited sources and tool results\n- Constraints: Focus on single-session implementation first, without cloud persistence; prioritize mobile adaptation.\n\nNext, I will organize the PRD key points and share them with the design and front-end teams.',
annotations: [
{
title: 'Semi Design',
url: 'https://semi.design/en-US/start/getting-started',
detail: 'Semi Design is a design system designed, developed, and maintained by the Douyin front-end team and the MED product design team. As a comprehensive, user-friendly, and high-quality modern application UI solution, Semi Design is derived from the complex scenarios across ByteDance various business lines. It currently supports nearly a thousand platform products and serves over 100,000 internal and external users.',
logo: 'https://lf3-static.bytednsdoc.com/obj/eden-cn/ptlz_zlp/ljhwZthlaukjlkulzlp/card-meta-avatar-docs-demo.jpg'
},
{
title: 'Semi DSM',
url: 'https://semi.design/en-US/start/getting-started',
detail: 'Semi DSM supports global and component-level style customization and maintains synchronization between Figma and online code. Using DSM, Semi Design can be adapted to Any Design.',
logo: 'https://lf3-static.bytednsdoc.com/obj/eden-cn/ptlz_zlp/ljhwZthlaukjlkulzlp/card-meta-avatar-docs-demo.jpg'
},
{
title: 'Semi D2C',
url: 'https://semi.design/en-US/start/getting-started',
detail: 'Semi D2C offers out-of-the-box design-to-code conversion: it supports one-click recognition of layer layouts and design system components in Figma pages, reproducing design drafts pixel-perfectly and translating them into React JSX and CSS code. Furthermore, it provides rich extensibility, allowing teams to quickly create their own custom design and development collaboration tools based on a custom plugin system.',
logo: 'https://lf3-static.bytednsdoc.com/obj/eden-cn/ptlz_zlp/ljhwZthlaukjlkulzlp/card-meta-avatar-docs-demo.jpg'
}
],
}],
}],
}, {
id: '3',
role: 'assistant',
name: 'PM',
content: [{
type: 'message',
content: [{
type: 'input_text',
text: 'The generated PRD is as follows. The designer will first use this summary to create the information architecture and key pages.',
}, {
type: 'resource',
name: 'PRD.doc',
size: '100KB',
}]
}],
}, {
id: '4',
role: 'assistant',
name: 'UI',
content: [{
id: "rs_02175871288540800000000000000000000ffffac1598778c9aa5",
type: "reasoning",
summary: [
{
"type": "summary_text",
"text": "\nBased on the PRD provided by the product manager, I need to draw the key pages."
}
],
status: "completed"
}, {
type: 'function_call',
name: 'paint_key_pages',
arguments: "{\"file\":\"PRD\"}",
status: 'completed',
}, {
type: 'message',
content: [{
"type": "output_text",
"text": `The initial design draft is as follows:\n\n- Information Architecture: Dialogue Page (History List | Message Flow | Tool Card Area)\n- Visuals: The left side displays Agent avatars and name tags, with color blocks distinguishing roles\n- Interaction:\n- Input area supports quick switching and suggestion prompts using @Agent\n- During streaming output, a typing bubble and progress placeholder are displayed\n- Tool results are inserted in the form of cards/step bars, which can be expanded for details and copied\n\nI'll start with a low-fidelity wireframe; high-fidelity and animation details will be added later.`,
}],
status: "completed"
}],
status: 'completed',
}, {
id: '5',
role: 'assistant',
name: 'FE',
content: `Technical Solution Suggestions:\n\n- Technology Stack: React + Semi UI, backend using WebSocket or SSE to support streaming responses\n- Data Model: Messages include fields such as id, role, name, content, status, and references\n- Component Splitting: AIChatInput + AIChatDialogue; content rendered using Markdown, supporting image and file clicks\n- Performance: Virtual list and scroll-to-bottom; long text chunked rendering; lazy loading of images\n- Observability: Message tracking latency, error rate, and tool call time\n\nIf confirmed, I can first build the page skeleton and integrate mock data for integration testing.`,
}];
const roleConfig = {
user: {
name: 'User',
avatar: 'https://lf3-static.bytednsdoc.com/obj/eden-cn/22606991eh7uhfups/img/user.png'
},
assistant: new Map([
['PM', {
name: 'Product Manager',
avatar: 'https://lf3-static.bytednsdoc.com/obj/eden-cn/22606991eh7uhfups/PM.png'
}],
['UI', {
name: 'Designer',
avatar: 'https://lf3-static.bytednsdoc.com/obj/eden-cn/22606991eh7uhfups/UI.png'
}],
['FE', {
name: 'Front-end programmer',
avatar: 'https://lf3-static.bytednsdoc.com/obj/eden-cn/22606991eh7uhfups/FE.png'
}],
]),
};
const uploadProps = {
action: "https://api.semi.design/upload"
};
const modelOptions = [
{
value: 'GPT-5',
label: 'GPT-5',
type: 'gpt',
},
{
value: 'GPT-4o',
label: 'GPT-4o',
type: 'gpt',
},
{
value: 'Claude 3.5 Sonnet',
label: 'Claude 3.5 Sonnet',
type: 'claude',
},
];
const mcpOptions = [
{
icon: <IconFeishuLogo />,
label: "Lark Doc",
value: "feishu",
},
{
icon: <IconGit />,
label: "Github Mcp",
value: "github",
},
{
icon: <IconFigma />,
label: "IconFigma Mcp",
value: "IconFigma",
}
];
const radioButtonProps = [
{ label: <IconTemplateStroked />, value: 'fast' },
{ label: <IconSearch />, value: 'think' }
];
render(AIChatInputWithDialogue);