Compare commits

..

171 Commits

Author SHA1 Message Date
yyh
3232548880 Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing 2026-03-15 12:58:50 +08:00
yyh
471106cef1 refactor(workflow): unify llm model issue checks 2026-03-13 21:54:25 +08:00
yyh
0c42c11d28 fix(workflow): scope llm model warning dot 2026-03-13 21:47:57 +08:00
yyh
03c58d151a test: strengthen model provider header coverage 2026-03-13 18:40:29 +08:00
yyh
fcc8e79733 Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing 2026-03-13 18:19:14 +08:00
yyh
21cb5ae1b6 Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing 2026-03-13 15:36:25 +08:00
yyh
a985be6282 Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing 2026-03-13 15:11:39 +08:00
yyh
d0054e28fe Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing 2026-03-12 23:12:06 +08:00
CodingOnStar
a1410dc531 test: add helper text visibility tests for model selector popup
- Implemented tests to verify the display of the compatible-only helper text based on the presence of scope features.
- Updated the Popup component to conditionally render a banner when scope features are applied.
- Added localization for the new helper text in English, Japanese, and Simplified Chinese.
2026-03-12 16:53:04 +08:00
yyh
e2f433bab9 test(web): add coverage for workflow plugin install flows 2026-03-12 16:07:50 +08:00
CodingOnStar
64a66f2adc Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing 2026-03-12 15:51:42 +08:00
yyh
e407d688d2 fix test 2026-03-12 15:45:24 +08:00
yyh
2f85c77a54 fix tests 2026-03-12 15:37:33 +08:00
yyh
906852fbd6 Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing 2026-03-12 15:34:35 +08:00
yyh
729e18a7d6 Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing
# Conflicts:
#	web/app/components/workflow/nodes/_base/components/variable/variable-label/base/variable-label.tsx
#	web/eslint-suppressions.json
2026-03-12 15:26:16 +08:00
autofix-ci[bot]
4ed49f1d98 [autofix.ci] apply automated fixes 2026-03-12 07:03:15 +00:00
yyh
04f4627f9b refactor: extract providerSupportsCredits into shared utility
Unify the credits-support check across useCredentialPanelState and
model-selector popup by extracting a pure providerSupportsCredits
function. This also fixes the popup's credits-exhausted alert firing
for non-trial providers like Minimax and ZHIPU AI.
2026-03-12 15:00:29 +08:00
CodingOnStar
c167ee199c feat: implement dynamic plugin card icon URL generation
Added a utility function to generate plugin card icon URLs based on the plugin's source and workspace context. Updated the Card component to utilize this function for determining the correct icon source. Enhanced unit tests to verify the correct URL generation for both marketplace and package icons.
2026-03-12 14:58:16 +08:00
yyh
339a8ca057 Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing 2026-03-12 14:46:16 +08:00
yyh
d39f243a4a fix: check trial_models list before treating provider as credits-supported
Providers like Minimax and ZHIPU AI have system_configuration.enabled=true
(free hosting quota) but are not in the trial_models list, so they should
not display "AI credits in use" or the Usage Priority switcher.
2026-03-12 14:45:48 +08:00
CodingOnStar
911d52cafc fix: unify model status display across knowledge base and model triggers 2026-03-12 14:01:22 +08:00
yyh
fee6d13f44 fix: improve workspace edit modal UX 2026-03-12 11:56:16 +08:00
yyh
cb8e20786a fix: remove aria-hidden from version switch icon
The icon conveys interactive meaning (switch version), so it should
not be hidden from assistive technologies.
2026-03-12 11:15:34 +08:00
CodingOnStar
d27a737cd1 test: add unit tests for RerankingModelSelector component 2026-03-12 11:12:25 +08:00
yyh
167fcc866d fix: use short "not configured" label for inline embedding model warning
Split embedding model validation message: checklist keeps full
"Embedding model not configured" while node inline row uses short
"Not configured" since the left label already says "Embedding model".
Also keep the row label color as tertiary gray instead of warning yellow.
2026-03-12 10:57:07 +08:00
CodingOnStar
82ad93eb1a Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing 2026-03-12 10:11:57 +08:00
yyh
f0086888e3 fix 2026-03-11 21:17:35 +08:00
yyh
ee2280851d fix: checklist popover 2026-03-11 21:07:33 +08:00
yyh
e9d0c7bb2a fix 2026-03-11 21:00:55 +08:00
yyh
06e1d59e1d chore: try 6 shard web tests ci 2026-03-11 20:53:17 +08:00
yyh
bd2bb27faa fix 2026-03-11 20:47:48 +08:00
yyh
c08b9a289b fix: tests 2026-03-11 20:42:40 +08:00
yyh
715a0fabfc fix: tests 2026-03-11 20:28:37 +08:00
yyh
5d07ccce59 fix: tests 2026-03-11 20:08:46 +08:00
autofix-ci[bot]
45e4d47207 [autofix.ci] apply automated fixes 2026-03-11 11:49:49 +00:00
yyh
fa664ebe77 refactor(web): migrate members settings overlays to base ui primitives 2026-03-11 19:39:05 +08:00
yyh
563d0c6892 Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing
# Conflicts:
#	web/contract/router.ts
2026-03-11 19:02:56 +08:00
yyh
af2a6b2de0 Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing 2026-03-11 18:19:41 +08:00
yyh
908e57b9f5 refactor: align Model Settings popover with Figma design
Restructure the popover layout to match design specs: add header with
close button, anchor popup to settings icon, change trigger to semantic
button, and widen panel to 400px.
2026-03-11 17:22:46 +08:00
yyh
d72fbce31c refactor: migrate PresetsParameter and ParameterItem to base/ui overlay primitives
Replace deprecated Dropdown, SimpleSelect, and Tooltip with DropdownMenu,
Select, and Tooltip compound components from base/ui. Hoist TONE_ICONS to
module level, remove FC in favor of function declarations, and prune
obsolete ESLint suppressions.
2026-03-11 16:54:14 +08:00
yyh
6cb68b6de5 fix: hide arrow-down chevron in trigger when status badge is shown 2026-03-11 16:46:18 +08:00
yyh
aeaf6d2ce9 fix: make model provider title sticky in selector dropdown
Add sticky positioning to provider title rows so they remain visible
while scrolling through models. Remove top padding from list container
to prevent the first provider title from shifting up before sticking.
2026-03-11 16:44:11 +08:00
yyh
ad4cb51983 refactor(trigger): derive multi-state status from credentials instead of collapsed disabled boolean
Replace the single `disabled` prop with a pure `deriveTriggerStatus` function
that maps to distinct states (empty, active, credits-exhausted, api-key-unavailable,
incompatible), each with its own badge text and tooltip. Unify non-workflow and
workflow modes into a single split layout, migrate icons to CSS icons, and add
per-status i18n tooltip keys.
2026-03-11 16:37:12 +08:00
CodingOnStar
3f27c8a9d2 fix(plugin-tasks): handle error actions by source and clear item after marketplace install 2026-03-11 15:59:37 +08:00
CodingOnStar
c2def7a840 fix: enhance model provider popup functionality and loading state handling
- Updated the model provider popup to include loading state for marketplace plugins.
- Improved filtering logic for installed models and marketplace providers.
- Added tests to ensure correct behavior when no models are found and when query parameters are omitted.
- Refactored the handling of model lists to better manage installed and available models.
2026-03-11 15:29:47 +08:00
CodingOnStar
f18fd566ba feat: implement model status mapping and enhance UI components
- Added a new status-mapping file to define internationalization keys for model statuses.
- Updated ModelName and Trigger components to conditionally display model metadata based on status.
- Enhanced tests for ModelSelectorTrigger to validate rendering behavior for different credential panel states.
- Improved styling and tooltip integration for status badges in the Trigger component.
2026-03-11 14:36:47 +08:00
yyh
0acc2eaa00 Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing 2026-03-11 14:26:14 +08:00
yyh
e0947a1ea8 Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing
# Conflicts:
#	web/eslint-suppressions.json
2026-03-11 14:23:04 +08:00
yyh
e51162af0c Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing 2026-03-11 11:57:11 +08:00
yyh
08da390678 fix: use destructive text color for api-unavailable credential name and remove redundant Unavailable label
The card-level StatusLabel now shows a red credential name for the
api-unavailable variant to match the Figma design. The "Unavailable"
text was removed since it only belongs inside the dropdown key list.
2026-03-11 11:56:50 +08:00
yyh
250450a54e fix: use primary button variant for api-required-add credential state
Align the "Add API Key" button to Figma design by switching from
secondary-accent to primary variant (blue bg + white text) for
providers with no AI credits and no API key configured.
2026-03-11 11:40:40 +08:00
CodingOnStar
5709a34a7f test: enhance ModelSelectorTrigger tests and integrate credential panel state
- Added tests for ModelSelectorTrigger to validate rendering based on credential panel state, including handling of credits exhausted scenarios.
- Updated ModelSelectorTrigger component to utilize useCredentialPanelState for determining status and rendering appropriate UI elements.
- Adjusted related tests to ensure correct behavior when model quota is exceeded and when the selected model is readonly.
- Improved styling for credits exhausted badge in the component.
2026-03-11 11:09:03 +08:00
CodingOnStar
e8ade9ad64 test(debug): add unit tests for Debug component and enhance Trigger component tests
- Introduced comprehensive unit tests for the Debug component, covering various states and interactions.
- Enhanced Trigger component tests to include new status badges, empty states, and improved rendering logic.
- Updated mock implementations to reflect changes in provider context and credential panel state.
- Ensured tests validate the correct rendering of UI elements based on different props and states.
2026-03-11 09:49:09 +08:00
CodingOnStar
34a5645d94 Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing 2026-03-11 09:47:17 +08:00
yyh
5e80a3f5de fix: use css icons 2026-03-11 00:04:31 +08:00
yyh
785e04816e Revert "chore: refresh vinext lockfile"
This reverts commit 7699b0d430.
2026-03-10 23:50:22 +08:00
yyh
2704688f59 Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing 2026-03-10 23:42:58 +08:00
yyh
7699b0d430 chore: refresh vinext lockfile 2026-03-10 23:42:56 +08:00
yyh
45c96dc254 feat(model-provider): add plugin update indicators and migrate to oRPC contracts
Problem: Model provider settings page (/plugins?action=showSettings&tab=provider)
was missing plugin update indicators (red dot badge, Update button) that the
/plugins page correctly displayed, because it only fetched installation data
without querying for latest marketplace versions.

Decision: Extract a shared usePluginsWithLatestVersion hook and migrate plugin
API endpoints to oRPC contracts, ensuring both pages use identical data flows.

Model: Both pages now follow the same pattern — fetch installed plugins via
consoleQuery.plugins.checkInstalled, enrich with latest version metadata via
usePluginsWithLatestVersion, then pass complete PluginDetail objects downstream
where useDetailHeaderState computes hasNewVersion for UI indicators.

Impact:
- Update badge red dot and Update button now appear on provider settings page
- Shared hook eliminates 15 lines of duplicate enrichment logic in plugins-panel
- oRPC contracts replace legacy post() calls for plugin endpoints
- Operation dropdown uses auto-width to prevent "View on Marketplace" text wrapping
- Version badge aligned to use Badge component consistently across both pages
- Update button tooltip added with bilingual i18n support
- Deprecated Tooltip migrated to Base UI Tooltip in detail-header
2026-03-10 23:28:09 +08:00
yyh
3a957cc28b Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing 2026-03-10 19:03:49 +08:00
CodingOnStar
7ed7562be6 feat(model-selector): add status badges and empty states for model trigger
- Add credits exhausted and API key unavailable split layout using useCredentialPanelState
  - Replace deprecated AlertTriangle icon with Incompatible badge and tooltip
  - Add empty state with brain icon placeholder and configure model text
  - Move STATUS_I18N_KEY to declarations.ts as shared constant
  - Redesign HasNotSetAPI as inline card layout, remove WarningMask overlay
  - Move no-API-key warning inline in debug panel, add no-model-selected state
  - Add i18n keys for en-US, ja-JP, zh-Hans
2026-03-10 18:02:14 +08:00
yyh
fda5d12107 Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing 2026-03-10 17:41:31 +08:00
yyh
0b2ded3227 feat(knowledge-base): add fine-grained embedding model validation with inline warnings
Extract validation logic from default.ts into shared utils.ts, enabling
node card, panel, and checklist to share the same validation rules.
Introduce provider-scoped model list queries to detect non-active model
states (noConfigure, quotaExceeded, credentialRemoved, incompatible).
Expand node card from 2 rows to 4 rows with per-row warning indicators,
and add warningDot support to panel field titles.
2026-03-10 17:25:27 +08:00
yyh
369e4eb7b0 fix(model-selector): use native button elements for Base UI trigger components
Replace <div> with <button type="button"> in PopoverTrigger and
TooltipTrigger render props to satisfy Base UI's nativeButton
requirement and restore proper button semantics.
2026-03-10 16:41:16 +08:00
yyh
a4942139d2 chore(model-selector): remove redundant z-index hacks after overlay unification
Now that base/ui primitives carry z-[1002] by default (#33185),
the per-call-site overrides (z-[1002] on ModelSelector, z-[1003]
on nested PopupItem dropdown) are no longer needed — DOM order
handles stacking for same-z-index portals.
2026-03-10 14:05:09 +08:00
yyh
83c15227f6 Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing 2026-03-10 13:58:19 +08:00
yyh
60f86f0520 Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing 2026-03-10 11:43:09 +08:00
yyh
b3c98e417d Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing 2026-03-09 23:47:27 +08:00
yyh
dfe389c017 Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing 2026-03-09 23:42:04 +08:00
yyh
b364b06e51 refactor(model-selector): migrate overlays to Popover/Tooltip and unify trigger component
- Migrate PortalToFollowElem to base-ui Popover in model-selector,
  model-parameter-modal, and plugin-detail-panel model-selector
- Migrate legacy Tooltip to compound Tooltip in popup-item and trigger
- Unify EmptyTrigger, ModelTrigger, DeprecatedModelTrigger into a
  single declarative ModelSelectorTrigger that derives state from props
- Remove showDeprecatedWarnIcon boolean prop anti-pattern; deprecated
  state always renders warn icon as part of component's visual contract
- Remove deprecatedClassName prop; component manages disabled styling
- Replace manual triggerRef width measurement with CSS var(--anchor-width)
- Remove tooltip scroll listener (base-ui auto-tracks anchor position)
- Restore conditional placement for workflow mode in plugin-detail-panel
- Prune stale ESLint suppressions for removed deprecated imports
2026-03-09 23:34:42 +08:00
CodingOnStar
ce0197b107 fix(provider): handle undefined provider in credential status and panel state 2026-03-09 18:20:02 +08:00
yyh
164cefc65c Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing 2026-03-09 17:41:13 +08:00
yyh
f6d80b9fa7 fix(workflow): derive plugin install state in render
Remove useEffect-based sync of _pluginInstallLocked/_dimmed in workflow nodes to avoid render-update loops.\n\nMove plugin-missing checks to pure utilities and use them in checklist.\nOptimize node installation hooks by enabling only relevant queries and narrowing memo dependencies.
2026-03-09 17:18:09 +08:00
yyh
e845fa7e6a fix(plugin-install): support bundle marketplace dependency shape 2026-03-09 17:07:27 +08:00
yyh
bab7bd5ecc Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing 2026-03-09 17:03:54 +08:00
yyh
cfb02bceaf feat(workflow): open install bundle from checklist and strict marketplace parsing 2026-03-09 17:03:43 +08:00
yyh
694ca840e1 feat(web): add warning dot indicator on LLM panel field labels synced with checklist
Store checklist items in zustand WorkflowStore so both the checklist UI
and node panels share a single source of truth. The LLM panel reads from
the store to show a Figma-aligned warning dot (absolute-positioned, no
layout shift) on the MODEL field label when the node has checklist warnings.
2026-03-09 16:38:31 +08:00
yyh
2d979e2cec fix(web): silence toast for model parameter rules fetch on missing provider
Add silent option to useModelParameterRules API call so uninstalled
provider errors are swallowed instead of surfacing a raw backend toast.
2026-03-09 16:17:09 +08:00
yyh
5cee7cf8ce feat(web): add LLM model plugin check to workflow checklist
Detect uninstalled model plugins for LLM nodes in the checklist and
publish-gate. Migrate ChecklistItem.errorMessage to errorMessages[]
so a single node can surface multiple validation issues at once.

- Extract shared extractPluginId utility for checklist and prompt editor
- Build installed-plugin Set (O(1) lookup) from ProviderContext
- Remove short-circuit between checkValid and variable validation
- Sync the same check into handleCheckBeforePublish
- Adapt node-group, use-last-run, and test assertions
2026-03-09 16:16:16 +08:00
yyh
0c17823c8b fix 2026-03-09 15:38:46 +08:00
yyh
49c6696d08 fix: use css icons 2026-03-09 15:27:08 +08:00
yyh
292c98a8f3 refactor(web): redesign workflow checklist panel with grouped tree view and Popover primitive
Migrate checklist from flat card list using deprecated PortalToFollowElem to
grouped tree view using base-ui Popover. Split into checklist/ directory with
separate components: plugin group with batch install, per-node groups with
sub-items and "Go to fix" hover action, and tree-line SVG indicators.
2026-03-09 15:23:34 +08:00
CodingOnStar
0e0a6ad043 test(web): enhance unit tests for credential and popup components
- Updated tests for CredentialItem to improve delete button interaction and check icon rendering.
- Enhanced PopupItem tests by mocking credential panel state for various scenarios, ensuring accurate rendering based on credit status.
- Adjusted Popup tests to include trial credits mock for better coverage of credit management logic.
- Refactored model list item tests to include wrapper for consistent rendering context.
2026-03-09 14:20:12 +08:00
yyh
456c95adb1 refactor(web): trigger error tooltip on entire variable badge hover 2026-03-09 14:03:52 +08:00
yyh
1abbaf9fd5 feat(web): differentiate invalid variable tooltips by model plugin status
Replace the generic "Invalid variable" message in prompt editor variable
labels with two context-aware messages: one for missing nodes and another
for uninstalled model plugins. Add useLlmModelPluginInstalled hook that
checks LLM node model providers against installed providers via
useProviderContextSelector. Migrate Tooltip usage to base-ui primitives
and replace RiErrorWarningFill with Warning icon in warning color.
2026-03-09 14:02:26 +08:00
CodingOnStar
1a26e1669b refactor(web): streamline PopupItem component for credit management
- Removed unused context and variables related to workspace and custom configuration.
- Simplified credit usage logic by leveraging state management for better clarity and performance.
- Enhanced readability by restructuring the code for determining credit status and API key activity.
2026-03-09 13:10:29 +08:00
CodingOnStar
02444af2e3 feat(web): enhance Popup and CreditsFallbackAlert components for better credit management
- Integrated trial credits check in the Popup component to conditionally display the CreditsExhaustedAlert.
- Updated the CreditsFallbackAlert to show a message only when API keys are unavailable.
- Removed the fallback description from translation files as it is no longer used.
2026-03-09 12:57:41 +08:00
CodingOnStar
56038e3684 feat(web): update credits fallback alert to include new description for no API keys
- Modified the CreditsFallbackAlert component to display a different message based on the presence of API keys.
- Added a new translation key for the fallback description in both English and Chinese JSON files.
2026-03-09 12:34:41 +08:00
CodingOnStar
eb9341e7ec feat(web): integrate CreditsCoin icon in PopupItem for enhanced UI
- Replaced the existing credits coin span with the CreditsCoin component for improved visual consistency.
- Updated imports to include the new CreditsCoin icon component.
2026-03-09 12:28:13 +08:00
CodingOnStar
e40b31b9c4 refactor(web): enhance model selector functionality and improve UI consistency
- Removed unnecessary ESLint suppressions for better code quality.
- Updated the ModelParameterModal and ModelSelector components to ensure consistent class ordering.
- Added onHide prop to ModelSelector for better control over dropdown visibility.
- Introduced useChangeProviderPriority hook to manage provider priority changes more effectively.
- Integrated CreditsExhaustedAlert in the Popup component to handle API key status more gracefully.
2026-03-09 12:24:54 +08:00
yyh
b89ee4807f Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing
# Conflicts:
#	web/app/components/header/account-setting/model-provider-page/index.spec.tsx
#	web/app/components/header/account-setting/model-provider-page/model-modal/index.spec.tsx
#	web/app/components/header/account-setting/model-provider-page/model-selector/popup-item.spec.tsx
#	web/app/components/header/account-setting/model-provider-page/model-selector/popup.spec.tsx
#	web/app/components/header/account-setting/model-provider-page/provider-added-card/credential-panel.spec.tsx
#	web/app/components/header/account-setting/model-provider-page/provider-added-card/index.spec.tsx
#	web/app/components/header/account-setting/model-provider-page/provider-added-card/quota-panel.spec.tsx
#	web/app/components/header/account-setting/model-provider-page/system-model-selector/index.spec.tsx
2026-03-09 12:12:27 +08:00
yyh
9907cf9e06 Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing 2026-03-08 22:27:42 +08:00
yyh
208a31719f Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing 2026-03-08 01:10:51 +08:00
yyh
3d1ef1f7f5 Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing 2026-03-06 21:45:37 +08:00
CodingOnStar
24b14e2c1a Merge remote-tracking branch 'origin/main' into feat/model-plugins-implementing 2026-03-06 19:00:17 +08:00
CodingOnStar
53f122f717 Merge branch 'feat/model-provider-refactor' into feat/model-plugins-implementing 2026-03-06 17:33:38 +08:00
CodingOnStar
fced2f9e65 refactor: enhance plugin management UI with error handling, improved rendering, and new components 2026-03-06 16:27:26 +08:00
yyh
0c08c4016d Merge remote-tracking branch 'origin/main' into feat/model-provider-refactor 2026-03-06 14:57:48 +08:00
CodingOnStar
ff4e4a8d64 refactor: enhance model trigger component with internationalization support and improved tooltip handling 2026-03-06 14:50:23 +08:00
yyh
948efa129f Merge remote-tracking branch 'origin/main' into feat/model-provider-refactor 2026-03-06 14:47:56 +08:00
CodingOnStar
e371bfd676 refactor: enhance model provider management with new icons, improved UI elements, and marketplace integration 2026-03-06 14:18:29 +08:00
yyh
6d612c0909 test: improve Jotai atom test quality and add model-provider atoms tests
Replace dynamic imports with static imports in marketplace atom tests.
Convert type-only and not-toThrow assertions into proper state-change
verifications. Add comprehensive test suite for model-provider-page
atoms covering all four hooks, cross-hook interaction, selectAtom
granularity, and Provider isolation.
2026-03-05 22:49:09 +08:00
yyh
56e0dc0ae6 trigger ci
Signed-off-by: yyh <yuanyouhuilyz@gmail.com>
2026-03-05 21:22:03 +08:00
yyh
975eca00c3 Merge remote-tracking branch 'origin/main' into feat/model-provider-refactor 2026-03-05 20:25:53 +08:00
yyh
f049bafcc3 refactor: simplify Jotai atoms by removing redundant write-only atoms
Replace 2 write-only derived atoms with primitive atom's built-in
updater functions. The selectAtom on the read side already prevents
unnecessary re-renders, making the manual guard logic redundant.
2026-03-05 20:25:29 +08:00
CodingOnStar
dd9c526447 refactor: update model-selector popup-item to support collapsible items and improve icon color handling 2026-03-05 16:45:37 +08:00
yyh
922dc71e36 fix 2026-03-05 16:17:38 +08:00
yyh
f03ec7f671 Merge branch 'main' into feat/model-provider-refactor 2026-03-05 16:14:36 +08:00
yyh
29f275442d Merge remote-tracking branch 'origin/main' into feat/model-provider-refactor
# Conflicts:
#	web/app/components/header/account-setting/model-provider-page/provider-added-card/credential-panel.spec.tsx
#	web/app/components/header/account-setting/model-provider-page/provider-added-card/credential-panel.tsx
#	web/app/components/header/account-setting/model-provider-page/system-model-selector/index.tsx
2026-03-05 16:13:40 +08:00
yyh
c9532ffd43 add stories 2026-03-05 15:55:21 +08:00
yyh
840dc33b8b Merge remote-tracking branch 'origin/main' into feat/model-provider-refactor 2026-03-05 15:12:32 +08:00
yyh
cae58a0649 Merge remote-tracking branch 'origin/main' into feat/model-provider-refactor 2026-03-05 15:08:13 +08:00
yyh
1752edc047 refactor(web): optimize model provider re-render and remove useEffect state sync
- Replace useEffect state sync with derived state pattern in useSystemDefaultModelAndModelList
- Use useCallback instead of useMemo for function memoization in useProviderCredentialsAndLoadBalancing
- Add memo() to ProviderAddedCard and CredentialPanel to prevent unnecessary re-renders
- Switch to useProviderContextSelector for precise context subscription in ProviderAddedCard
- Stabilize activate callback ref in useActivateCredential via supportedModelTypes ref
- Add usage priority tooltip with i18n support
2026-03-05 15:07:53 +08:00
yyh
7471c32612 Revert "temp: remove IS_CLOUD_EDITION guard from supportsCredits for local testing"
This reverts commit ab87ac333a.
2026-03-05 14:33:48 +08:00
yyh
2d333bbbe5 refactor(web): extract credential activation into hook and migrate credential-item overlays
Extract credential switching logic from dropdown-content into a dedicated
useActivateCredential hook with optimistic updates and proper data flow
separation. Credential items now stay visible in the popover after clicking
(no auto-close), show cursor-pointer, and disable during activation.

Migrate credential-item from legacy Tooltip and remixicon imports to
base-ui Tooltip and CSS icon classes, pruning stale ESLint suppressions.
2026-03-05 14:22:39 +08:00
yyh
4af6788ce0 fix(web): wrap Header test in Dialog context for base-ui compatibility 2026-03-05 14:20:35 +08:00
yyh
24b072def9 fix: lint 2026-03-05 14:08:20 +08:00
yyh
909c8c3350 Merge remote-tracking branch 'origin/main' into feat/model-provider-refactor 2026-03-05 13:58:51 +08:00
yyh
80e9c8bee0 refactor(web): make account setting fully controlled with action props 2026-03-05 13:39:36 +08:00
yyh
15b7b304d2 refactor(web): migrate model-modal overlays to base-ui Dialog and AlertDialog
Replace legacy PortalToFollowElem and Confirm with Dialog/AlertDialog
primitives. Remove manual ESC handler and backdrop div — now handled
natively by base-ui. Add backdropProps={{ forceRender: true }} for
correct nested overlay rendering.
2026-03-05 13:33:53 +08:00
yyh
61e2672b59 refactor(web): make provider reset event-driven and scope model invalidation
- remove provider-page lifecycle reset effect and handle reset in explicit tab/close actions
- switch account setting tab state to controlled/uncontrolled pattern without sync effect
- use provider-scoped model list queryKey with exact invalidation in credential and model toggle mutations
- update related tests and mocks for new behavior
2026-03-05 13:28:30 +08:00
yyh
5f4ed4c6f6 refactor(web): replace model provider emitter refresh with jotai state
- add atom-based provider expansion state with reset/prune helpers
- remove event-emitter dependency from model provider refresh flow
- invalidate exact provider model-list query key on refresh
- reset expansion state on model provider page mount/unmount
- update and extend tests for external expansion and query invalidation
- update eslint suppressions to match current code
2026-03-05 13:20:58 +08:00
yyh
4a1032c628 fix(web): remove redundant hover text swap on show models button
Merge the two hover-toggling divs into a single always-visible element
and remove the unused showModelsNum i18n key from all locales.
2026-03-05 13:16:04 +08:00
yyh
423c97a47e code style 2026-03-05 13:09:33 +08:00
yyh
a7e3fb2e33 fix(web): use triangle Warning icon instead of circle error icon
Replace i-ri-error-warning-fill (circle exclamation) with the
Warning component (triangle) for api-fallback and credits-fallback
variants to match Figma design.
2026-03-05 13:07:20 +08:00
yyh
ce34937a1c feat(web): add credits-fallback variant for API Key priority with available credits
When API Key is selected but unavailable/unconfigured and credits are
available, the card now shows "AI credits in use" with a warning icon
instead of "API key required". When both credits are exhausted and no
API key exists, it shows "No available usage" (destructive).

New deriveVariant logic for priority=apiKey:
- !exhausted + !authorized → credits-fallback (was api-required-*)
- exhausted + no credential → no-usage (was api-required-add)
- exhausted + named unauthorized → api-unavailable (unchanged)
2026-03-05 13:02:40 +08:00
yyh
ad9ac6978e fix(web): align alert card width with API key section in dropdown
Change mx-1 (4px) to mx-2 (8px) on CreditsFallbackAlert and
CreditsExhaustedAlert to match ApiKeySection's p-2 (8px) padding,
consistent with Figma design where both sections are 8px from the
dropdown edge.
2026-03-05 12:56:55 +08:00
yyh
57c1ba3543 fix(web): hide divider above empty API keys state in dropdown
Move the border from UsagePrioritySection (always visible) to
ApiKeySection's list variant (only when credentials exist). This
removes the unwanted divider line above the "No API Keys" empty
state card when on the AI Credits tab with no keys configured.
2026-03-05 12:25:11 +08:00
yyh
d7a5af2b9a Merge remote-tracking branch 'origin/main' into feat/model-provider-refactor
# Conflicts:
#	web/app/components/header/account-setting/model-provider-page/index.tsx
2026-03-05 10:46:24 +08:00
yyh
d45edffaa3 fix(web): wire upgrade link to pricing modal and add credits-coin icon
Replace broken HTML string interpolation with Trans component and
useModalContextSelector so "upgrade your plan" opens the pricing modal.
Add custom credits-coin SVG icon to replace the generic ri-coin-line.
2026-03-05 10:39:31 +08:00
yyh
530515b6ef fix(web): prevent model list from expanding on priority switch
Remove UPDATE_MODEL_PROVIDER_CUSTOM_MODEL_LIST event emission from
changePriority onSuccess. This event was designed for custom model
add/edit/delete scenarios where the card should expand, but firing
it on priority switch caused ProviderAddedCard to unexpectedly
expand via refreshModelList → setCollapsed(false).
2026-03-05 10:35:03 +08:00
yyh
f13f0d1f9a fix(web): align dropdown alerts with Figma design and fix hardcoded credits total
- Expose totalCredits from useTrialCredits hook instead of hardcoding 10,000
- Align CreditsExhaustedAlert with Figma: dynamic progress bar, correct
  design tokens (components-progress-error-bg/progress), sm-medium/xs-regular
  typography
- Align CreditsFallbackAlert typography to sm-medium/xs-regular
- Fix ApiKeySection empty state: horizontal gradient, sm-medium title,
  Figma-aligned padding (pl-7 for API KEYS label)
- Hoist empty credentials array constant to stabilize memo (rerender-memo-with-default-value)
- Remove redundant useCallback wrapper in ApiKeySection
- Replace nested ternary with Record lookup in TextLabel
- Remove dead || 0 guard in useTrialCredits
- Update all test mocks with totalCredits field
2026-03-05 10:09:51 +08:00
yyh
b597d52c11 refactor(web): remove dialog description from system model selector
Remove the DialogDescription and its i18n key (modelProvider.systemModelSettingsLink)
from the system model settings dialog across all 23 locales.
2026-03-05 10:05:01 +08:00
yyh
34c42fe666 Revert "temp: remove cloud condition"
This reverts commit 29e344ac8b.
2026-03-05 09:44:19 +08:00
yyh
dc109c99f0 test(web): expand credential panel and dropdown test coverage for all 8 card variants
Add comprehensive behavioral tests covering all discriminated union variants,
destructive/default styling, warning icons, CreditsFallbackAlert conditions,
credential CRUD interactions, AlertDialog delete confirmation, and Popover behavior.
2026-03-05 09:41:48 +08:00
yyh
223b9d89c1 refactor(web): migrate priority change to oRPC contract with useMutation
- Add changePreferredProviderType contract in model-providers.ts
- Register in consoleRouterContract
- Replace raw async changeModelProviderPriority with useMutation
- Use Toast.notify (static API) instead of useToastContext hook
- Pass isPending as isChangingPriority to disable buttons during switch
- Add disabled prop to UsagePrioritySection
- Fix pre-existing test assertions for api-unavailable variant
- Update all specs with isChangingPriority prop and oRPC mock pattern
2026-03-05 09:30:38 +08:00
yyh
dd119eb44f fix(web): align UsagePrioritySection with Figma design and fix i18n key ordering
- Single-row layout for icon, label, and option cards
- Icon: arrow-up-double-line matching design spec
- Buttons: flexible width with whitespace-nowrap instead of fixed w-[72px]
- Add min-w-0 + truncate for text overflow, focus-visible ring for a11y
- Sort modelProvider.card.* i18n keys alphabetically
2026-03-05 09:15:16 +08:00
yyh
970493fa85 test(web): update tests for credential panel refactoring and new ModelAuthDropdown components
Rewrite credential-panel.spec.tsx to match the new discriminated union
state model and variant-driven rendering. Add new test files for
useCredentialPanelState hook, SystemQuotaCard Label enhancement,
and all ModelAuthDropdown sub-components.
2026-03-05 08:41:17 +08:00
yyh
ab87ac333a temp: remove IS_CLOUD_EDITION guard from supportsCredits for local testing 2026-03-05 08:34:10 +08:00
yyh
b8b70da9ad refactor(web): rewrite CredentialPanel with declarative variant-driven state and new ModelAuthDropdown
- Extract useCredentialPanelState hook with discriminated union CardVariant type replacing scattered boolean conditions
- Create ModelAuthDropdown compound component (Popover-based) with UsagePrioritySection, CreditsExhaustedAlert, and ApiKeySection
- Enhance SystemQuotaCard.Label to accept className override for flexible styling
- Add i18n keys for new card states and dropdown content (en-US, zh-Hans)
2026-03-05 08:33:04 +08:00
yyh
77d81aebe8 Merge remote-tracking branch 'origin/main' into feat/model-provider-refactor 2026-03-04 23:35:20 +08:00
yyh
deb4cd3ece fix: i18n 2026-03-04 23:35:13 +08:00
yyh
648d9ef1f9 refactor(web): extract SystemQuotaCard compound component and shared useTrialCredits hook
Extract trial credits calculation into a shared useTrialCredits hook to prevent
logic drift between QuotaPanel and CredentialPanel. Add SystemQuotaCard compound
component with explicit default/destructive variants for the system quota UI
state in provider cards, replacing inline conditional styling with composable
Label and Actions slots. Remove unnecessary useMemo for simple derived values.
2026-03-04 23:30:25 +08:00
yyh
5ed4797078 fix 2026-03-04 22:53:29 +08:00
yyh
62631658e9 fix(web): update tests for AlertDialog migration and component API changes
- Replace deprecated Confirm mock with real AlertDialog role-based queries
- Add useInvalidateCheckInstalled mock for QueryClient dependency
- Wrap model-list-item renders in QueryClientProvider
- Migrate PluginVersionPicker from PortalToFollowElem to Popover
- Migrate UpdatePluginModal from Modal to Dialog
- Update version picker offset props (sideOffset/alignOffset)
2026-03-04 22:52:21 +08:00
yyh
22a4100dd7 fix(web): invalidate plugin checkInstalled cache after version updates 2026-03-04 22:33:17 +08:00
yyh
0f7ed6f67e refactor(web): align provider badges with figma and remove dead add-model-button 2026-03-04 22:29:51 +08:00
yyh
4d9fcbec57 refactor(web): migrate remove-plugin dialog to base UI AlertDialog and improve UX
- Replace deprecated Confirm component with AlertDialog primitives
- Add forceRender backdrop for proper overlay rendering
- Add success Toast notification after plugin removal
- Update "View Detail" text to "View on Marketplace" (en/zh-Hans)
- Add i18n keys for delete success message
- Prune stale eslint suppression for header-modals
2026-03-04 22:14:19 +08:00
yyh
4d7a9bc798 fix(web): align model provider cache invalidation with oRPC keys 2026-03-04 22:06:27 +08:00
yyh
d6d04ed657 fix 2026-03-04 22:03:06 +08:00
yyh
f594a71dae fix: icon 2026-03-04 22:02:36 +08:00
yyh
04e0ab7eda refactor(web): migrate provider-added-card model list to oRPC query-driven state 2026-03-04 21:55:34 +08:00
yyh
784bda9c86 refactor(web): migrate operation-dropdown to base UI and align provider card styles with Figma
- Migrate OperationDropdown from legacy portal-to-follow-elem to base UI DropdownMenu primitives
- Add placement, sideOffset, alignOffset, popupClassName props for flexible positioning
- Fix version badge font size: system-2xs-medium-uppercase (10px) → system-xs-medium-uppercase (12px)
- Set provider card dropdown to bottom-start placement with 192px width per Figma spec
- Fix PluginVersionPicker toggle: clicking badge now opens and closes the picker
- Add max-h-[224px] overflow scroll to version list
- Replace Remix icon imports with Tailwind CSS icon classes
- Prune stale eslint suppressions for migrated files
2026-03-04 21:55:23 +08:00
yyh
1af1fb6913 feat(web): add version badge and actions menu to provider cards
Integrate plugin version management into model provider cards by
reusing existing plugin detail panel hooks and components. Batch
query installed plugins at list level to avoid N+1 requests.
2026-03-04 21:29:52 +08:00
yyh
1f0c36e9f7 fix: style 2026-03-04 21:07:42 +08:00
yyh
455ae65025 fix: style 2026-03-04 20:58:14 +08:00
yyh
d44682e957 refactor(web): align quota panel with Figma design and migrate to base UI tooltip
- Rename title from "Quota" to "AI Credits" and update tooltip copy
  (Message Credits → AI Credits, free → Trial)
- Show "Credits exhausted" in destructive text when credits reach zero
  instead of displaying the number "0"
- Migrate from deprecated Tooltip to base UI Tooltip compound component
- Add 4px grid background with radial fade mask via CSS module
- Simplify provider icon tooltip text for uninstalled state
- Update i18n keys for both en-US and zh-Hans
2026-03-04 20:52:30 +08:00
yyh
8c4afc0c18 fix(model-selector): align empty trigger with default trigger style 2026-03-04 20:14:49 +08:00
yyh
539cbcae6a fix(account-settings): render nested system model backdrop via base ui 2026-03-04 19:57:53 +08:00
yyh
8d257fea7c chore(web): commit dialog overlay follow-up changes 2026-03-04 19:37:10 +08:00
yyh
c3364ac350 refactor(web): align account settings dialogs with base UI 2026-03-04 19:31:14 +08:00
yyh
f991644989 refactor(pricing): migrate to base ui dialog and extract category types 2026-03-04 19:26:54 +08:00
yyh
29e344ac8b temp: remove cloud condition 2026-03-04 18:50:38 +08:00
yyh
1ad9305732 fix(web): avoid quota panel flicker on account-setting tab switch
- remove mount-time workspace invalidate in model provider page

- read quota with useCurrentWorkspace and keep loading only for initial empty fetch

- reuse existing useSystemFeaturesQuery for marketplace and trial models

- update model provider and quota panel tests for new query/loading behavior
2026-03-04 18:43:01 +08:00
yyh
17f38f171d lint 2026-03-04 18:21:59 +08:00
yyh
802088c8eb test(web): fix trivial assertion and add useInvalidateDefaultModel tests
Replace the no-provider test assertion from checking a nonexistent i18n
key to verifying actual warning keys are absent. Add unit tests for
useInvalidateDefaultModel following the useUpdateModelList pattern.
2026-03-04 17:51:20 +08:00
yyh
cad6d94491 refactor(web): replace remixicon imports with Tailwind CSS icons in system-model-selector 2026-03-04 17:45:41 +08:00
yyh
621d0fb2c9 fix 2026-03-04 17:42:34 +08:00
yyh
a92fb3244b fix(web): skip top warning for no-provider state and remove unused i18n key
The empty state card below already prompts users to install a provider,
so the top warning bar is redundant for the no-provider case. Remove
the unused noProviderInstalled i18n key and replace the lookup map with
a ternary to preserve i18n literal types without assertions.
2026-03-04 17:39:49 +08:00
yyh
97508f8d7b fix(web): invalidate default model cache after saving system model settings
After saving system models, only the model list cache was invalidated
but not the default model cache, causing stale config status in the UI.
Add useInvalidateDefaultModel hook and call it for all 5 model types
after a successful save.
2026-03-04 17:26:24 +08:00
yyh
70e677a6ac feat(web): refine system model settings to 4 distinct config states
Replace the single `defaultModelNotConfigured` boolean with a derived
`systemModelConfigStatus` that distinguishes between no-provider,
none-configured, partially-configured, and fully-configured states,
each showing a context-appropriate warning message. Also updates the
button label from "System Model Settings" to "Default Model Settings"
and migrates remixicon imports to Tailwind CSS icon classes.
2026-03-04 16:58:46 +08:00
931 changed files with 18120 additions and 10746 deletions

View File

@@ -27,7 +27,7 @@ jobs:
persist-credentials: false
- name: Setup UV and Python
uses: astral-sh/setup-uv@e06108dd0aef18192324c70427afc47652e63a82 # v7.5.0
uses: astral-sh/setup-uv@6ee6290f1cbc4156c0bdd66691b2c144ef8df19a # v7.4.0
with:
enable-cache: true
python-version: ${{ matrix.python-version }}

View File

@@ -39,7 +39,7 @@ jobs:
with:
python-version: "3.11"
- uses: astral-sh/setup-uv@e06108dd0aef18192324c70427afc47652e63a82 # v7.5.0
- uses: astral-sh/setup-uv@6ee6290f1cbc4156c0bdd66691b2c144ef8df19a # v7.4.0
- name: Generate Docker Compose
if: steps.docker-compose-changes.outputs.any_changed == 'true'

View File

@@ -113,7 +113,7 @@ jobs:
context: "web"
steps:
- name: Download digests
uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
uses: actions/download-artifact@70fc10c6e5e1ce46ad2ea6f2b72d43f7d47b13c3 # v8.0.0
with:
path: /tmp/digests
pattern: digests-${{ matrix.context }}-*

View File

@@ -19,7 +19,7 @@ jobs:
persist-credentials: false
- name: Setup UV and Python
uses: astral-sh/setup-uv@e06108dd0aef18192324c70427afc47652e63a82 # v7.5.0
uses: astral-sh/setup-uv@6ee6290f1cbc4156c0bdd66691b2c144ef8df19a # v7.4.0
with:
enable-cache: true
python-version: "3.12"
@@ -69,7 +69,7 @@ jobs:
persist-credentials: false
- name: Setup UV and Python
uses: astral-sh/setup-uv@e06108dd0aef18192324c70427afc47652e63a82 # v7.5.0
uses: astral-sh/setup-uv@6ee6290f1cbc4156c0bdd66691b2c144ef8df19a # v7.4.0
with:
enable-cache: true
python-version: "3.12"

View File

@@ -28,7 +28,7 @@ jobs:
migration-changed: ${{ steps.changes.outputs.migration }}
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: dorny/paths-filter@fbd0ab8f3e69293af611ebaee6363fc25e6d187d # v4.0.1
- uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
id: changes
with:
filters: |

View File

@@ -22,7 +22,7 @@ jobs:
fetch-depth: 0
- name: Setup Python & UV
uses: astral-sh/setup-uv@e06108dd0aef18192324c70427afc47652e63a82 # v7.5.0
uses: astral-sh/setup-uv@6ee6290f1cbc4156c0bdd66691b2c144ef8df19a # v7.4.0
with:
enable-cache: true

View File

@@ -33,7 +33,7 @@ jobs:
- name: Setup UV and Python
if: steps.changed-files.outputs.any_changed == 'true'
uses: astral-sh/setup-uv@e06108dd0aef18192324c70427afc47652e63a82 # v7.5.0
uses: astral-sh/setup-uv@6ee6290f1cbc4156c0bdd66691b2c144ef8df19a # v7.4.0
with:
enable-cache: false
python-version: "3.12"

View File

@@ -120,7 +120,7 @@ jobs:
- name: Run Claude Code for Translation Sync
if: steps.detect_changes.outputs.CHANGED_FILES != ''
uses: anthropics/claude-code-action@cd77b50d2b0808657f8e6774085c8bf54484351c # v1.0.72
uses: anthropics/claude-code-action@26ec041249acb0a944c0a47b6c0c13f05dbc5b44 # v1.0.70
with:
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
github_token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -31,7 +31,7 @@ jobs:
remove_tool_cache: true
- name: Setup UV and Python
uses: astral-sh/setup-uv@e06108dd0aef18192324c70427afc47652e63a82 # v7.5.0
uses: astral-sh/setup-uv@6ee6290f1cbc4156c0bdd66691b2c144ef8df19a # v7.4.0
with:
enable-cache: true
python-version: ${{ matrix.python-version }}

View File

@@ -26,8 +26,8 @@ jobs:
strategy:
fail-fast: false
matrix:
shardIndex: [1, 2, 3, 4]
shardTotal: [4]
shardIndex: [1, 2, 3, 4, 5, 6]
shardTotal: [6]
defaults:
run:
shell: bash
@@ -77,7 +77,7 @@ jobs:
uses: ./.github/actions/setup-web
- name: Download blob reports
uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
uses: actions/download-artifact@70fc10c6e5e1ce46ad2ea6f2b72d43f7d47b13c3 # v8.0.0
with:
path: web/.vitest-reports
pattern: blob-report-*

3
.gitignore vendored
View File

@@ -237,6 +237,3 @@ scripts/stress-test/reports/
# settings
*.local.json
*.local.md
# Code Agent Folder
.qoder/*

View File

@@ -22,10 +22,10 @@ APP_WEB_URL=http://localhost:3000
# Files URL
FILES_URL=http://localhost:5001
# INTERNAL_FILES_URL is used by services running in Docker to reach the API file endpoints.
# For Docker Desktop (Mac/Windows), use http://host.docker.internal:5001 when the API runs on the host.
# For Docker Compose on Linux, use http://api:5001 when the API runs inside the Docker network.
INTERNAL_FILES_URL=http://host.docker.internal:5001
# INTERNAL_FILES_URL is used for plugin daemon communication within Docker network.
# Set this to the internal Docker service URL for proper plugin file access.
# Example: INTERNAL_FILES_URL=http://api:5001
INTERNAL_FILES_URL=http://127.0.0.1:5001
# TRIGGER URL
TRIGGER_URL=http://localhost:5001
@@ -180,7 +180,7 @@ CONSOLE_CORS_ALLOW_ORIGINS=http://localhost:3000,*
COOKIE_DOMAIN=
# Vector database configuration
# Supported values are `weaviate`, `oceanbase`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `opengauss`, `tablestore`,`vastbase`,`tidb`,`tidb_on_qdrant`,`baidu`,`lindorm`,`huawei_cloud`,`upstash`, `matrixone`, `hologres`.
# Supported values are `weaviate`, `oceanbase`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `opengauss`, `tablestore`,`vastbase`,`tidb`,`tidb_on_qdrant`,`baidu`,`lindorm`,`huawei_cloud`,`upstash`, `matrixone`.
VECTOR_STORE=weaviate
# Prefix used to create collection name in vector database
VECTOR_INDEX_NAME_PREFIX=Vector_index
@@ -217,20 +217,6 @@ COUCHBASE_PASSWORD=password
COUCHBASE_BUCKET_NAME=Embeddings
COUCHBASE_SCOPE_NAME=_default
# Hologres configuration
# access_key_id is used as the PG username, access_key_secret is used as the PG password
HOLOGRES_HOST=
HOLOGRES_PORT=80
HOLOGRES_DATABASE=
HOLOGRES_ACCESS_KEY_ID=
HOLOGRES_ACCESS_KEY_SECRET=
HOLOGRES_SCHEMA=public
HOLOGRES_TOKENIZER=jieba
HOLOGRES_DISTANCE_METHOD=Cosine
HOLOGRES_BASE_QUANTIZATION_TYPE=rabitq
HOLOGRES_MAX_DEGREE=64
HOLOGRES_EF_CONSTRUCTION=400
# Milvus configuration
MILVUS_URI=http://127.0.0.1:19530
MILVUS_TOKEN=

View File

@@ -96,6 +96,7 @@ ignore_imports =
dify_graph.nodes.tool.tool_node -> core.callback_handler.workflow_tool_callback_handler
dify_graph.nodes.tool.tool_node -> core.tools.tool_engine
dify_graph.nodes.tool.tool_node -> core.tools.tool_manager
dify_graph.nodes.knowledge_retrieval.knowledge_retrieval_node -> core.app.app_config.entities
dify_graph.nodes.parameter_extractor.parameter_extractor_node -> core.prompt.advanced_prompt_transform
dify_graph.nodes.parameter_extractor.parameter_extractor_node -> core.prompt.simple_prompt_transform
dify_graph.nodes.parameter_extractor.parameter_extractor_node -> dify_graph.model_runtime.model_providers.__base.large_language_model
@@ -103,6 +104,7 @@ ignore_imports =
dify_graph.nodes.parameter_extractor.parameter_extractor_node -> core.model_manager
dify_graph.nodes.question_classifier.question_classifier_node -> core.model_manager
dify_graph.nodes.tool.tool_node -> core.tools.utils.message_transformer
dify_graph.nodes.llm.node -> core.helper.code_executor
dify_graph.nodes.llm.node -> core.llm_generator.output_parser.errors
dify_graph.nodes.llm.node -> core.llm_generator.output_parser.structured_output
dify_graph.nodes.llm.node -> core.model_manager
@@ -114,6 +116,7 @@ ignore_imports =
dify_graph.nodes.parameter_extractor.parameter_extractor_node -> core.prompt.utils.prompt_message_util
dify_graph.nodes.question_classifier.entities -> core.prompt.entities.advanced_prompt_entities
dify_graph.nodes.question_classifier.question_classifier_node -> core.prompt.utils.prompt_message_util
dify_graph.nodes.knowledge_index.entities -> core.rag.retrieval.retrieval_methods
dify_graph.nodes.llm.node -> models.dataset
dify_graph.nodes.llm.file_saver -> core.tools.signature
dify_graph.nodes.llm.file_saver -> core.tools.tool_file_manager

View File

@@ -97,7 +97,7 @@ ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
# Download nltk data
RUN mkdir -p /usr/local/share/nltk_data \
&& NLTK_DATA=/usr/local/share/nltk_data python -c "import nltk; nltk.download('punkt'); nltk.download('averaged_perceptron_tagger'); nltk.download('stopwords')" \
&& NLTK_DATA=/usr/local/share/nltk_data python -c "import nltk; from unstructured.nlp.tokenize import download_nltk_packages; nltk.download('punkt'); nltk.download('averaged_perceptron_tagger'); nltk.download('stopwords'); download_nltk_packages()" \
&& chmod -R 755 /usr/local/share/nltk_data
ENV TIKTOKEN_CACHE_DIR=/app/api/.tiktoken_cache

View File

@@ -1,45 +1,16 @@
import logging
import time
from flask import request
from opentelemetry.trace import get_current_span
from opentelemetry.trace.span import INVALID_SPAN_ID, INVALID_TRACE_ID
from configs import dify_config
from contexts.wrapper import RecyclableContextVar
from controllers.console.error import UnauthorizedAndForceLogout
from core.logging.context import init_request_context
from dify_app import DifyApp
from services.enterprise.enterprise_service import EnterpriseService
from services.feature_service import LicenseStatus
logger = logging.getLogger(__name__)
# Console bootstrap APIs exempt from license check.
# Defined at module level to avoid per-request tuple construction.
# - system-features: license status for expiry UI (GlobalPublicStoreProvider)
# - setup: install/setup status check (AppInitializer)
# - init: init password validation for fresh install (InitPasswordPopup)
# - login: auto-login after setup completion (InstallForm)
# - features: billing/plan features (ProviderContextProvider)
# - account/profile: login check + user profile (AppContextProvider, useIsLogin)
# - workspaces/current: workspace + model providers (AppContextProvider)
# - version: version check (AppContextProvider)
# - activate/check: invitation link validation (signin page)
# Without these exemptions, the signin page triggers location.reload()
# on unauthorized_and_force_logout, causing an infinite loop.
_CONSOLE_EXEMPT_PREFIXES = (
"/console/api/system-features",
"/console/api/setup",
"/console/api/init",
"/console/api/login",
"/console/api/features",
"/console/api/account/profile",
"/console/api/workspaces/current",
"/console/api/version",
"/console/api/activate/check",
)
# ----------------------------
# Application Factory Function
@@ -60,39 +31,6 @@ def create_flask_app_with_configs() -> DifyApp:
init_request_context()
RecyclableContextVar.increment_thread_recycles()
# Enterprise license validation for API endpoints (both console and webapp)
# When license expires, block all API access except bootstrap endpoints needed
# for the frontend to load the license expiration page without infinite reloads.
if dify_config.ENTERPRISE_ENABLED:
is_console_api = request.path.startswith("/console/api/")
is_webapp_api = request.path.startswith("/api/")
if is_console_api or is_webapp_api:
if is_console_api:
is_exempt = any(request.path.startswith(p) for p in _CONSOLE_EXEMPT_PREFIXES)
else: # webapp API
is_exempt = request.path.startswith("/api/system-features")
if not is_exempt:
try:
# Check license status (cached — see EnterpriseService for TTL details)
license_status = EnterpriseService.get_cached_license_status()
if license_status in (LicenseStatus.INACTIVE, LicenseStatus.EXPIRED, LicenseStatus.LOST):
raise UnauthorizedAndForceLogout(
f"Enterprise license is {license_status}. Please contact your administrator."
)
if license_status is None:
raise UnauthorizedAndForceLogout(
"Unable to verify enterprise license. Please contact your administrator."
)
except UnauthorizedAndForceLogout:
raise
except Exception:
logger.exception("Failed to check enterprise license status")
raise UnauthorizedAndForceLogout(
"Unable to verify enterprise license. Please contact your administrator."
)
# add after request hook for injecting trace headers from OpenTelemetry span context
# Only adds headers when OTEL is enabled and has valid context
@dify_app.after_request

View File

@@ -160,7 +160,6 @@ def migrate_knowledge_vector_database():
}
lower_collection_vector_types = {
VectorType.ANALYTICDB,
VectorType.HOLOGRES,
VectorType.CHROMA,
VectorType.MYSCALE,
VectorType.PGVECTO_RS,

View File

@@ -26,7 +26,6 @@ from .vdb.chroma_config import ChromaConfig
from .vdb.clickzetta_config import ClickzettaConfig
from .vdb.couchbase_config import CouchbaseConfig
from .vdb.elasticsearch_config import ElasticsearchConfig
from .vdb.hologres_config import HologresConfig
from .vdb.huawei_cloud_config import HuaweiCloudConfig
from .vdb.iris_config import IrisVectorConfig
from .vdb.lindorm_config import LindormConfig
@@ -348,7 +347,6 @@ class MiddlewareConfig(
AnalyticdbConfig,
ChromaConfig,
ClickzettaConfig,
HologresConfig,
HuaweiCloudConfig,
IrisVectorConfig,
MilvusConfig,

View File

@@ -1,68 +0,0 @@
from holo_search_sdk.types import BaseQuantizationType, DistanceType, TokenizerType
from pydantic import Field
from pydantic_settings import BaseSettings
class HologresConfig(BaseSettings):
"""
Configuration settings for Hologres vector database.
Hologres is compatible with PostgreSQL protocol.
access_key_id is used as the PostgreSQL username,
and access_key_secret is used as the PostgreSQL password.
"""
HOLOGRES_HOST: str | None = Field(
description="Hostname or IP address of the Hologres instance.",
default=None,
)
HOLOGRES_PORT: int = Field(
description="Port number for connecting to the Hologres instance.",
default=80,
)
HOLOGRES_DATABASE: str | None = Field(
description="Name of the Hologres database to connect to.",
default=None,
)
HOLOGRES_ACCESS_KEY_ID: str | None = Field(
description="Alibaba Cloud AccessKey ID, also used as the PostgreSQL username.",
default=None,
)
HOLOGRES_ACCESS_KEY_SECRET: str | None = Field(
description="Alibaba Cloud AccessKey Secret, also used as the PostgreSQL password.",
default=None,
)
HOLOGRES_SCHEMA: str = Field(
description="Schema name in the Hologres database.",
default="public",
)
HOLOGRES_TOKENIZER: TokenizerType = Field(
description="Tokenizer for full-text search index (e.g., 'jieba', 'ik', 'standard', 'simple').",
default="jieba",
)
HOLOGRES_DISTANCE_METHOD: DistanceType = Field(
description="Distance method for vector index (e.g., 'Cosine', 'Euclidean', 'InnerProduct').",
default="Cosine",
)
HOLOGRES_BASE_QUANTIZATION_TYPE: BaseQuantizationType = Field(
description="Base quantization type for vector index (e.g., 'rabitq', 'sq8', 'fp16', 'fp32').",
default="rabitq",
)
HOLOGRES_MAX_DEGREE: int = Field(
description="Max degree (M) parameter for HNSW vector index.",
default=64,
)
HOLOGRES_EF_CONSTRUCTION: int = Field(
description="ef_construction parameter for HNSW vector index.",
default=400,
)

View File

@@ -25,8 +25,7 @@ from controllers.console.wraps import (
)
from core.ops.ops_trace_manager import OpsTraceManager
from core.rag.retrieval.retrieval_methods import RetrievalMethod
from core.trigger.constants import TRIGGER_NODE_TYPES
from dify_graph.enums import WorkflowExecutionStatus
from dify_graph.enums import NodeType, WorkflowExecutionStatus
from dify_graph.file import helpers as file_helpers
from extensions.ext_database import db
from libs.login import current_account_with_tenant, login_required
@@ -509,7 +508,11 @@ class AppListApi(Resource):
.scalars()
.all()
)
trigger_node_types = TRIGGER_NODE_TYPES
trigger_node_types = {
NodeType.TRIGGER_WEBHOOK,
NodeType.TRIGGER_SCHEDULE,
NodeType.TRIGGER_PLUGIN,
}
for workflow in draft_workflows:
node_id = None
try:

View File

@@ -22,7 +22,6 @@ from core.app.apps.workflow.app_generator import SKIP_PREPARE_USER_INPUTS_KEY
from core.app.entities.app_invoke_entities import InvokeFrom
from core.helper.trace_id_helper import get_external_trace_id
from core.plugin.impl.exc import PluginInvokeError
from core.trigger.constants import TRIGGER_SCHEDULE_NODE_TYPE
from core.trigger.debug.event_selectors import (
TriggerDebugEvent,
TriggerDebugEventPoller,
@@ -1210,7 +1209,7 @@ class DraftWorkflowTriggerNodeApi(Resource):
node_type: NodeType = draft_workflow.get_node_type_from_node_config(node_config)
event: TriggerDebugEvent | None = None
# for schedule trigger, when run single node, just execute directly
if node_type == TRIGGER_SCHEDULE_NODE_TYPE:
if node_type == NodeType.TRIGGER_SCHEDULE:
event = TriggerDebugEvent(
workflow_args={},
node_id=node_id,

View File

@@ -263,7 +263,6 @@ def _get_retrieval_methods_by_vector_type(vector_type: str | None, is_mock: bool
VectorType.BAIDU,
VectorType.ALIBABACLOUD_MYSQL,
VectorType.IRIS,
VectorType.HOLOGRES,
}
semantic_methods = {"retrieval_method": [RetrievalMethod.SEMANTIC_SEARCH.value]}

View File

@@ -3,7 +3,7 @@ import time
from collections.abc import Callable
from enum import StrEnum, auto
from functools import wraps
from typing import Concatenate, ParamSpec, TypeVar, cast, overload
from typing import Concatenate, ParamSpec, TypeVar, cast
from flask import current_app, request
from flask_login import user_logged_in
@@ -44,22 +44,10 @@ class FetchUserArg(BaseModel):
required: bool = False
@overload
def validate_app_token(view: Callable[P, R]) -> Callable[P, R]: ...
@overload
def validate_app_token(
view: None = None, *, fetch_user_arg: FetchUserArg | None = None
) -> Callable[[Callable[P, R]], Callable[P, R]]: ...
def validate_app_token(
view: Callable[P, R] | None = None, *, fetch_user_arg: FetchUserArg | None = None
) -> Callable[P, R] | Callable[[Callable[P, R]], Callable[P, R]]:
def decorator(view_func: Callable[P, R]) -> Callable[P, R]:
def validate_app_token(view: Callable[P, R] | None = None, *, fetch_user_arg: FetchUserArg | None = None):
def decorator(view_func: Callable[P, R]):
@wraps(view_func)
def decorated_view(*args: P.args, **kwargs: P.kwargs) -> R:
def decorated_view(*args: P.args, **kwargs: P.kwargs):
api_token = validate_and_get_api_token("app")
app_model = db.session.query(App).where(App.id == api_token.app_id).first()
@@ -225,20 +213,10 @@ def cloud_edition_billing_rate_limit_check(resource: str, api_token_type: str):
return interceptor
@overload
def validate_dataset_token(view: Callable[Concatenate[T, P], R]) -> Callable[P, R]: ...
@overload
def validate_dataset_token(view: None = None) -> Callable[[Callable[Concatenate[T, P], R]], Callable[P, R]]: ...
def validate_dataset_token(
view: Callable[Concatenate[T, P], R] | None = None,
) -> Callable[P, R] | Callable[[Callable[Concatenate[T, P], R]], Callable[P, R]]:
def decorator(view_func: Callable[Concatenate[T, P], R]) -> Callable[P, R]:
@wraps(view_func)
def decorated(*args: P.args, **kwargs: P.kwargs) -> R:
def validate_dataset_token(view: Callable[Concatenate[T, P], R] | None = None):
def decorator(view: Callable[Concatenate[T, P], R]):
@wraps(view)
def decorated(*args: P.args, **kwargs: P.kwargs):
api_token = validate_and_get_api_token("dataset")
# get url path dataset_id from positional args or kwargs
@@ -309,7 +287,7 @@ def validate_dataset_token(
raise Unauthorized("Tenant owner account does not exist.")
else:
raise Unauthorized("Tenant does not exist.")
return view_func(api_token.tenant_id, *args, **kwargs) # type: ignore[arg-type]
return view(api_token.tenant_id, *args, **kwargs)
return decorated

View File

@@ -69,7 +69,7 @@ from dify_graph.entities.pause_reason import HumanInputRequired
from dify_graph.enums import WorkflowExecutionStatus
from dify_graph.model_runtime.entities.llm_entities import LLMUsage
from dify_graph.model_runtime.utils.encoders import jsonable_encoder
from dify_graph.nodes import BuiltinNodeTypes
from dify_graph.nodes import NodeType
from dify_graph.repositories.draft_variable_repository import DraftVariableSaverFactory
from dify_graph.runtime import GraphRuntimeState
from dify_graph.system_variable import SystemVariable
@@ -357,7 +357,7 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport):
) -> Generator[StreamResponse, None, None]:
"""Handle node succeeded events."""
# Record files if it's an answer node or end node
if event.node_type in [BuiltinNodeTypes.ANSWER, BuiltinNodeTypes.END, BuiltinNodeTypes.LLM]:
if event.node_type in [NodeType.ANSWER, NodeType.END, NodeType.LLM]:
self._recorded_files.extend(
self._workflow_response_converter.fetch_files_from_node_outputs(event.outputs or {})
)

View File

@@ -48,13 +48,12 @@ from core.app.entities.task_entities import (
from core.plugin.impl.datasource import PluginDatasourceManager
from core.tools.entities.tool_entities import ToolProviderType
from core.tools.tool_manager import ToolManager
from core.trigger.constants import TRIGGER_PLUGIN_NODE_TYPE
from core.trigger.trigger_manager import TriggerManager
from core.workflow.workflow_entry import WorkflowEntry
from dify_graph.entities.pause_reason import HumanInputRequired
from dify_graph.entities.workflow_start_reason import WorkflowStartReason
from dify_graph.enums import (
BuiltinNodeTypes,
NodeType,
SystemVariableKey,
WorkflowExecutionStatus,
WorkflowNodeExecutionMetadataKey,
@@ -443,7 +442,7 @@ class WorkflowResponseConverter:
event: QueueNodeStartedEvent,
task_id: str,
) -> NodeStartStreamResponse | None:
if event.node_type in {BuiltinNodeTypes.ITERATION, BuiltinNodeTypes.LOOP}:
if event.node_type in {NodeType.ITERATION, NodeType.LOOP}:
return None
run_id = self._ensure_workflow_run_id()
snapshot = self._store_snapshot(event)
@@ -465,13 +464,13 @@ class WorkflowResponseConverter:
)
try:
if event.node_type == BuiltinNodeTypes.TOOL:
if event.node_type == NodeType.TOOL:
response.data.extras["icon"] = ToolManager.get_tool_icon(
tenant_id=self._application_generate_entity.app_config.tenant_id,
provider_type=ToolProviderType(event.provider_type),
provider_id=event.provider_id,
)
elif event.node_type == BuiltinNodeTypes.DATASOURCE:
elif event.node_type == NodeType.DATASOURCE:
manager = PluginDatasourceManager()
provider_entity = manager.fetch_datasource_provider(
self._application_generate_entity.app_config.tenant_id,
@@ -480,7 +479,7 @@ class WorkflowResponseConverter:
response.data.extras["icon"] = provider_entity.declaration.identity.generate_datasource_icon_url(
self._application_generate_entity.app_config.tenant_id
)
elif event.node_type == TRIGGER_PLUGIN_NODE_TYPE:
elif event.node_type == NodeType.TRIGGER_PLUGIN:
response.data.extras["icon"] = TriggerManager.get_trigger_plugin_icon(
self._application_generate_entity.app_config.tenant_id,
event.provider_id,
@@ -497,7 +496,7 @@ class WorkflowResponseConverter:
event: QueueNodeSucceededEvent | QueueNodeFailedEvent | QueueNodeExceptionEvent,
task_id: str,
) -> NodeFinishStreamResponse | None:
if event.node_type in {BuiltinNodeTypes.ITERATION, BuiltinNodeTypes.LOOP}:
if event.node_type in {NodeType.ITERATION, NodeType.LOOP}:
return None
run_id = self._ensure_workflow_run_id()
snapshot = self._pop_snapshot(event.node_execution_id)
@@ -555,7 +554,7 @@ class WorkflowResponseConverter:
event: QueueNodeRetryEvent,
task_id: str,
) -> NodeRetryStreamResponse | None:
if event.node_type in {BuiltinNodeTypes.ITERATION, BuiltinNodeTypes.LOOP}:
if event.node_type in {NodeType.ITERATION, NodeType.LOOP}:
return None
run_id = self._ensure_workflow_run_id()
@@ -613,7 +612,7 @@ class WorkflowResponseConverter:
data=IterationNodeStartStreamResponse.Data(
id=event.node_id,
node_id=event.node_id,
node_type=event.node_type,
node_type=event.node_type.value,
title=event.node_title,
created_at=int(time.time()),
extras={},
@@ -636,7 +635,7 @@ class WorkflowResponseConverter:
data=IterationNodeNextStreamResponse.Data(
id=event.node_id,
node_id=event.node_id,
node_type=event.node_type,
node_type=event.node_type.value,
title=event.node_title,
index=event.index,
created_at=int(time.time()),
@@ -663,7 +662,7 @@ class WorkflowResponseConverter:
data=IterationNodeCompletedStreamResponse.Data(
id=event.node_id,
node_id=event.node_id,
node_type=event.node_type,
node_type=event.node_type.value,
title=event.node_title,
outputs=new_outputs,
outputs_truncated=outputs_truncated,
@@ -693,7 +692,7 @@ class WorkflowResponseConverter:
data=LoopNodeStartStreamResponse.Data(
id=event.node_id,
node_id=event.node_id,
node_type=event.node_type,
node_type=event.node_type.value,
title=event.node_title,
created_at=int(time.time()),
extras={},
@@ -716,7 +715,7 @@ class WorkflowResponseConverter:
data=LoopNodeNextStreamResponse.Data(
id=event.node_id,
node_id=event.node_id,
node_type=event.node_type,
node_type=event.node_type.value,
title=event.node_title,
index=event.index,
# The `pre_loop_output` field is not utilized by the frontend.
@@ -745,7 +744,7 @@ class WorkflowResponseConverter:
data=LoopNodeCompletedStreamResponse.Data(
id=event.node_id,
node_id=event.node_id,
node_type=event.node_type,
node_type=event.node_type.value,
title=event.node_title,
outputs=new_outputs,
outputs_truncated=outputs_truncated,

View File

@@ -12,7 +12,7 @@ from core.app.entities.app_invoke_entities import (
build_dify_run_context,
)
from core.app.workflow.layers.persistence import PersistenceWorkflowInfo, WorkflowPersistenceLayer
from core.workflow.node_factory import DifyNodeFactory, get_default_root_node_id
from core.workflow.node_factory import DifyNodeFactory
from core.workflow.workflow_entry import WorkflowEntry
from dify_graph.entities.graph_init_params import GraphInitParams
from dify_graph.enums import WorkflowType
@@ -274,8 +274,6 @@ class PipelineRunner(WorkflowBasedAppRunner):
graph_init_params=graph_init_params,
graph_runtime_state=graph_runtime_state,
)
if start_node_id is None:
start_node_id = get_default_root_node_id(graph_config)
graph = Graph.init(graph_config=graph_config, node_factory=node_factory, root_node_id=start_node_id)
if not graph:

View File

@@ -32,8 +32,8 @@ from core.app.entities.queue_entities import (
QueueWorkflowStartedEvent,
QueueWorkflowSucceededEvent,
)
from core.rag.entities.citation_metadata import RetrievalSourceMetadata
from core.workflow.node_factory import DifyNodeFactory, get_default_root_node_id, resolve_workflow_node_class
from core.workflow.node_factory import DifyNodeFactory
from core.workflow.node_resolution import resolve_workflow_node_class
from core.workflow.workflow_entry import WorkflowEntry
from dify_graph.entities import GraphInitParams
from dify_graph.entities.graph_config import NodeConfigDictAdapter
@@ -140,9 +140,6 @@ class WorkflowBasedAppRunner:
graph_runtime_state=graph_runtime_state,
)
if root_node_id is None:
root_node_id = get_default_root_node_id(graph_config)
# init graph
graph = Graph.init(graph_config=graph_config, node_factory=node_factory, root_node_id=root_node_id)
@@ -508,9 +505,7 @@ class WorkflowBasedAppRunner:
elif isinstance(event, NodeRunRetrieverResourceEvent):
self._publish_event(
QueueRetrieverResourcesEvent(
retriever_resources=[
RetrievalSourceMetadata.model_validate(resource) for resource in event.retriever_resources
],
retriever_resources=event.retriever_resources,
in_iteration_id=event.in_iteration_id,
in_loop_id=event.in_loop_id,
)

View File

@@ -9,8 +9,9 @@ from core.app.entities.agent_strategy import AgentStrategyInfo
from core.rag.entities.citation_metadata import RetrievalSourceMetadata
from dify_graph.entities.pause_reason import PauseReason
from dify_graph.entities.workflow_start_reason import WorkflowStartReason
from dify_graph.enums import NodeType, WorkflowNodeExecutionMetadataKey
from dify_graph.enums import WorkflowNodeExecutionMetadataKey
from dify_graph.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk
from dify_graph.nodes import NodeType
class QueueEvent(StrEnum):

View File

@@ -2,7 +2,7 @@ import logging
from dify_graph.constants import CONVERSATION_VARIABLE_NODE_ID
from dify_graph.conversation_variable_updater import ConversationVariableUpdater
from dify_graph.enums import BuiltinNodeTypes
from dify_graph.enums import NodeType
from dify_graph.graph_engine.layers.base import GraphEngineLayer
from dify_graph.graph_events import GraphEngineEvent, NodeRunSucceededEvent
from dify_graph.nodes.variable_assigner.common import helpers as common_helpers
@@ -22,7 +22,7 @@ class ConversationVariablePersistenceLayer(GraphEngineLayer):
def on_event(self, event: GraphEngineEvent) -> None:
if not isinstance(event, NodeRunSucceededEvent):
return
if event.node_type != BuiltinNodeTypes.VARIABLE_ASSIGNER:
if event.node_type != NodeType.VARIABLE_ASSIGNER:
return
if self.graph_runtime_state is None:
return

View File

@@ -12,7 +12,7 @@ from typing_extensions import override
from core.app.llm import deduct_llm_quota, ensure_llm_quota_available
from core.errors.error import QuotaExceededError
from core.model_manager import ModelInstance
from dify_graph.enums import BuiltinNodeTypes
from dify_graph.enums import NodeType
from dify_graph.graph_engine.entities.commands import AbortCommand, CommandType
from dify_graph.graph_engine.layers.base import GraphEngineLayer
from dify_graph.graph_events import GraphEngineEvent, GraphNodeEventBase
@@ -113,11 +113,11 @@ class LLMQuotaLayer(GraphEngineLayer):
def _extract_model_instance(node: Node) -> ModelInstance | None:
try:
match node.node_type:
case BuiltinNodeTypes.LLM:
case NodeType.LLM:
return cast("LLMNode", node).model_instance
case BuiltinNodeTypes.PARAMETER_EXTRACTOR:
case NodeType.PARAMETER_EXTRACTOR:
return cast("ParameterExtractorNode", node).model_instance
case BuiltinNodeTypes.QUESTION_CLASSIFIER:
case NodeType.QUESTION_CLASSIFIER:
return cast("QuestionClassifierNode", node).model_instance
case _:
return None

View File

@@ -16,7 +16,7 @@ from opentelemetry.trace import Span, SpanKind, Tracer, get_tracer, set_span_in_
from typing_extensions import override
from configs import dify_config
from dify_graph.enums import BuiltinNodeTypes, NodeType
from dify_graph.enums import NodeType
from dify_graph.graph_engine.layers.base import GraphEngineLayer
from dify_graph.graph_events import GraphNodeEventBase
from dify_graph.nodes.base.node import Node
@@ -74,13 +74,16 @@ class ObservabilityLayer(GraphEngineLayer):
def _build_parser_registry(self) -> None:
"""Initialize parser registry for node types."""
self._parsers = {
BuiltinNodeTypes.TOOL: ToolNodeOTelParser(),
BuiltinNodeTypes.LLM: LLMNodeOTelParser(),
BuiltinNodeTypes.KNOWLEDGE_RETRIEVAL: RetrievalNodeOTelParser(),
NodeType.TOOL: ToolNodeOTelParser(),
NodeType.LLM: LLMNodeOTelParser(),
NodeType.KNOWLEDGE_RETRIEVAL: RetrievalNodeOTelParser(),
}
def _get_parser(self, node: Node) -> NodeOTelParser:
return self._parsers.get(node.node_type, self._default_parser)
node_type = getattr(node, "node_type", None)
if isinstance(node_type, NodeType):
return self._parsers.get(node_type, self._default_parser)
return self._default_parser
@override
def on_graph_start(self) -> None:

View File

@@ -24,12 +24,12 @@ from core.datasource.utils.message_transformer import DatasourceFileMessageTrans
from core.datasource.website_crawl.website_crawl_provider import WebsiteCrawlDatasourcePluginProviderController
from core.db.session_factory import session_factory
from core.plugin.impl.datasource import PluginDatasourceManager
from core.workflow.nodes.datasource.entities import DatasourceParameter, OnlineDriveDownloadFileParam
from dify_graph.entities.workflow_node_execution import WorkflowNodeExecutionStatus
from dify_graph.enums import WorkflowNodeExecutionMetadataKey
from dify_graph.file import File
from dify_graph.file.enums import FileTransferMethod, FileType
from dify_graph.node_events import NodeRunResult, StreamChunkEvent, StreamCompletedEvent
from dify_graph.repositories.datasource_manager_protocol import DatasourceParameter, OnlineDriveDownloadFileParam
from factories import file_factory
from models.model import UploadFile
from models.tools import ToolFile

View File

@@ -58,7 +58,7 @@ from core.ops.entities.trace_entity import (
)
from core.repositories import DifyCoreRepositoryFactory
from dify_graph.entities import WorkflowNodeExecution
from dify_graph.enums import BuiltinNodeTypes, WorkflowNodeExecutionMetadataKey
from dify_graph.enums import NodeType, WorkflowNodeExecutionMetadataKey
from extensions.ext_database import db
from models import WorkflowNodeExecutionTriggeredFrom
@@ -302,11 +302,11 @@ class AliyunDataTrace(BaseTraceInstance):
self, node_execution: WorkflowNodeExecution, trace_info: WorkflowTraceInfo, trace_metadata: TraceMetadata
):
try:
if node_execution.node_type == BuiltinNodeTypes.LLM:
if node_execution.node_type == NodeType.LLM:
node_span = self.build_workflow_llm_span(trace_info, node_execution, trace_metadata)
elif node_execution.node_type == BuiltinNodeTypes.KNOWLEDGE_RETRIEVAL:
elif node_execution.node_type == NodeType.KNOWLEDGE_RETRIEVAL:
node_span = self.build_workflow_retrieval_span(trace_info, node_execution, trace_metadata)
elif node_execution.node_type == BuiltinNodeTypes.TOOL:
elif node_execution.node_type == NodeType.TOOL:
node_span = self.build_workflow_tool_span(trace_info, node_execution, trace_metadata)
else:
node_span = self.build_workflow_task_span(trace_info, node_execution, trace_metadata)

View File

@@ -155,8 +155,8 @@ def wrap_span_metadata(metadata, **kwargs):
return metadata
# Mapping from built-in node type strings to OpenInference span kinds.
# Node types not listed here default to CHAIN.
# Mapping from NodeType string values to OpenInference span kinds.
# NodeType values not listed here default to CHAIN.
_NODE_TYPE_TO_SPAN_KIND: dict[str, OpenInferenceSpanKindValues] = {
"llm": OpenInferenceSpanKindValues.LLM,
"knowledge-retrieval": OpenInferenceSpanKindValues.RETRIEVER,
@@ -168,7 +168,7 @@ _NODE_TYPE_TO_SPAN_KIND: dict[str, OpenInferenceSpanKindValues] = {
def _get_node_span_kind(node_type: str) -> OpenInferenceSpanKindValues:
"""Return the OpenInference span kind for a given workflow node type.
Covers every built-in node type string. Nodes that do not have a
Covers every ``NodeType`` enum value. Nodes that do not have a
specialised span kind (e.g. ``start``, ``end``, ``if-else``,
``code``, ``loop``, ``iteration``, etc.) are mapped to ``CHAIN``.
"""

View File

@@ -28,7 +28,7 @@ from core.ops.langfuse_trace.entities.langfuse_trace_entity import (
)
from core.ops.utils import filter_none_values
from core.repositories import DifyCoreRepositoryFactory
from dify_graph.enums import BuiltinNodeTypes
from dify_graph.enums import NodeType
from extensions.ext_database import db
from models import EndUser, WorkflowNodeExecutionTriggeredFrom
from models.enums import MessageStatus
@@ -141,7 +141,7 @@ class LangFuseDataTrace(BaseTraceInstance):
node_name = node_execution.title
node_type = node_execution.node_type
status = node_execution.status
if node_type == BuiltinNodeTypes.LLM:
if node_type == NodeType.LLM:
inputs = node_execution.process_data.get("prompts", {}) if node_execution.process_data else {}
else:
inputs = node_execution.inputs or {}

View File

@@ -28,7 +28,7 @@ from core.ops.langsmith_trace.entities.langsmith_trace_entity import (
)
from core.ops.utils import filter_none_values, generate_dotted_order
from core.repositories import DifyCoreRepositoryFactory
from dify_graph.enums import BuiltinNodeTypes, WorkflowNodeExecutionMetadataKey
from dify_graph.enums import NodeType, WorkflowNodeExecutionMetadataKey
from extensions.ext_database import db
from models import EndUser, MessageFile, WorkflowNodeExecutionTriggeredFrom
@@ -163,7 +163,7 @@ class LangSmithDataTrace(BaseTraceInstance):
node_name = node_execution.title
node_type = node_execution.node_type
status = node_execution.status
if node_type == BuiltinNodeTypes.LLM:
if node_type == NodeType.LLM:
inputs = node_execution.process_data.get("prompts", {}) if node_execution.process_data else {}
else:
inputs = node_execution.inputs or {}
@@ -197,7 +197,7 @@ class LangSmithDataTrace(BaseTraceInstance):
"ls_model_name": process_data.get("model_name", ""),
}
)
elif node_type == BuiltinNodeTypes.KNOWLEDGE_RETRIEVAL:
elif node_type == NodeType.KNOWLEDGE_RETRIEVAL:
run_type = LangSmithRunType.retriever
else:
run_type = LangSmithRunType.tool

View File

@@ -23,7 +23,7 @@ from core.ops.entities.trace_entity import (
TraceTaskName,
WorkflowTraceInfo,
)
from dify_graph.enums import BuiltinNodeTypes
from dify_graph.enums import NodeType
from extensions.ext_database import db
from models import EndUser
from models.workflow import WorkflowNodeExecutionModel
@@ -145,10 +145,10 @@ class MLflowDataTrace(BaseTraceInstance):
"app_name": node.title,
}
if node.node_type in (BuiltinNodeTypes.LLM, BuiltinNodeTypes.QUESTION_CLASSIFIER):
if node.node_type in (NodeType.LLM, NodeType.QUESTION_CLASSIFIER):
inputs, llm_attributes = self._parse_llm_inputs_and_attributes(node)
attributes.update(llm_attributes)
elif node.node_type == BuiltinNodeTypes.HTTP_REQUEST:
elif node.node_type == NodeType.HTTP_REQUEST:
inputs = node.process_data # contains request URL
if not inputs:
@@ -180,9 +180,9 @@ class MLflowDataTrace(BaseTraceInstance):
# End node span
finished_at = node.created_at + timedelta(seconds=node.elapsed_time)
outputs = json.loads(node.outputs) if node.outputs else {}
if node.node_type == BuiltinNodeTypes.KNOWLEDGE_RETRIEVAL:
if node.node_type == NodeType.KNOWLEDGE_RETRIEVAL:
outputs = self._parse_knowledge_retrieval_outputs(outputs)
elif node.node_type == BuiltinNodeTypes.LLM:
elif node.node_type == NodeType.LLM:
outputs = outputs.get("text", outputs)
node_span.end(
outputs=outputs,
@@ -471,13 +471,13 @@ class MLflowDataTrace(BaseTraceInstance):
def _get_node_span_type(self, node_type: str) -> str:
"""Map Dify node types to MLflow span types"""
node_type_mapping = {
BuiltinNodeTypes.LLM: SpanType.LLM,
BuiltinNodeTypes.QUESTION_CLASSIFIER: SpanType.LLM,
BuiltinNodeTypes.KNOWLEDGE_RETRIEVAL: SpanType.RETRIEVER,
BuiltinNodeTypes.TOOL: SpanType.TOOL,
BuiltinNodeTypes.CODE: SpanType.TOOL,
BuiltinNodeTypes.HTTP_REQUEST: SpanType.TOOL,
BuiltinNodeTypes.AGENT: SpanType.AGENT,
NodeType.LLM: SpanType.LLM,
NodeType.QUESTION_CLASSIFIER: SpanType.LLM,
NodeType.KNOWLEDGE_RETRIEVAL: SpanType.RETRIEVER,
NodeType.TOOL: SpanType.TOOL,
NodeType.CODE: SpanType.TOOL,
NodeType.HTTP_REQUEST: SpanType.TOOL,
NodeType.AGENT: SpanType.AGENT,
}
return node_type_mapping.get(node_type, "CHAIN") # type: ignore[arg-type,call-overload]

View File

@@ -23,7 +23,7 @@ from core.ops.entities.trace_entity import (
WorkflowTraceInfo,
)
from core.repositories import DifyCoreRepositoryFactory
from dify_graph.enums import BuiltinNodeTypes, WorkflowNodeExecutionMetadataKey
from dify_graph.enums import NodeType, WorkflowNodeExecutionMetadataKey
from extensions.ext_database import db
from models import EndUser, MessageFile, WorkflowNodeExecutionTriggeredFrom
@@ -187,7 +187,7 @@ class OpikDataTrace(BaseTraceInstance):
node_name = node_execution.title
node_type = node_execution.node_type
status = node_execution.status
if node_type == BuiltinNodeTypes.LLM:
if node_type == NodeType.LLM:
inputs = node_execution.process_data.get("prompts", {}) if node_execution.process_data else {}
else:
inputs = node_execution.inputs or {}

View File

@@ -27,7 +27,7 @@ from core.repositories import SQLAlchemyWorkflowNodeExecutionRepository
from dify_graph.entities.workflow_node_execution import (
WorkflowNodeExecution,
)
from dify_graph.nodes import BuiltinNodeTypes
from dify_graph.nodes import NodeType
from extensions.ext_database import db
from models import Account, App, TenantAccountJoin, WorkflowNodeExecutionTriggeredFrom
@@ -179,7 +179,7 @@ class TencentDataTrace(BaseTraceInstance):
if node_span:
self.trace_client.add_span(node_span)
if node_execution.node_type == BuiltinNodeTypes.LLM:
if node_execution.node_type == NodeType.LLM:
self._record_llm_metrics(node_execution)
except Exception:
logger.exception("[Tencent APM] Failed to process node execution: %s", node_execution.id)
@@ -192,15 +192,15 @@ class TencentDataTrace(BaseTraceInstance):
) -> SpanData | None:
"""Build span for different node types"""
try:
if node_execution.node_type == BuiltinNodeTypes.LLM:
if node_execution.node_type == NodeType.LLM:
return TencentSpanBuilder.build_workflow_llm_span(
trace_id, workflow_span_id, trace_info, node_execution
)
elif node_execution.node_type == BuiltinNodeTypes.KNOWLEDGE_RETRIEVAL:
elif node_execution.node_type == NodeType.KNOWLEDGE_RETRIEVAL:
return TencentSpanBuilder.build_workflow_retrieval_span(
trace_id, workflow_span_id, trace_info, node_execution
)
elif node_execution.node_type == BuiltinNodeTypes.TOOL:
elif node_execution.node_type == NodeType.TOOL:
return TencentSpanBuilder.build_workflow_tool_span(
trace_id, workflow_span_id, trace_info, node_execution
)

View File

@@ -31,7 +31,7 @@ from core.ops.entities.trace_entity import (
)
from core.ops.weave_trace.entities.weave_trace_entity import WeaveTraceModel
from core.repositories import DifyCoreRepositoryFactory
from dify_graph.enums import BuiltinNodeTypes, WorkflowNodeExecutionMetadataKey
from dify_graph.enums import NodeType, WorkflowNodeExecutionMetadataKey
from extensions.ext_database import db
from models import EndUser, MessageFile, WorkflowNodeExecutionTriggeredFrom
@@ -175,7 +175,7 @@ class WeaveDataTrace(BaseTraceInstance):
node_name = node_execution.title
node_type = node_execution.node_type
status = node_execution.status
if node_type == BuiltinNodeTypes.LLM:
if node_type == NodeType.LLM:
inputs = node_execution.process_data.get("prompts", {}) if node_execution.process_data else {}
else:
inputs = node_execution.inputs or {}

View File

@@ -1,5 +1,5 @@
from core.plugin.backwards_invocation.base import BaseBackwardsInvocation
from dify_graph.enums import BuiltinNodeTypes
from dify_graph.enums import NodeType
from dify_graph.nodes.parameter_extractor.entities import (
ModelConfig as ParameterExtractorModelConfig,
)
@@ -52,7 +52,7 @@ class PluginNodeBackwardsInvocation(BaseBackwardsInvocation):
instruction=instruction, # instruct with variables are not supported
)
node_data_dict = node_data.model_dump()
node_data_dict["type"] = BuiltinNodeTypes.PARAMETER_EXTRACTOR
node_data_dict["type"] = NodeType.PARAMETER_EXTRACTOR
execution = workflow_service.run_free_workflow_node(
node_data_dict,
tenant_id=tenant_id,

View File

@@ -305,7 +305,9 @@ class ProviderManager:
available_models = provider_configurations.get_models(model_type=model_type, only_active=True)
if available_models:
available_model = available_models[0]
available_model = next(
(model for model in available_models if model.model == "gpt-4"), available_models[0]
)
default_model = TenantDefaultModel(
tenant_id=tenant_id,

View File

@@ -1,361 +0,0 @@
import json
import logging
import time
from typing import Any
import holo_search_sdk as holo # type: ignore
from holo_search_sdk.types import BaseQuantizationType, DistanceType, TokenizerType
from psycopg import sql as psql
from pydantic import BaseModel, model_validator
from configs import dify_config
from core.rag.datasource.vdb.vector_base import BaseVector
from core.rag.datasource.vdb.vector_factory import AbstractVectorFactory
from core.rag.datasource.vdb.vector_type import VectorType
from core.rag.embedding.embedding_base import Embeddings
from core.rag.models.document import Document
from extensions.ext_redis import redis_client
from models.dataset import Dataset
logger = logging.getLogger(__name__)
class HologresVectorConfig(BaseModel):
"""
Configuration for Hologres vector database connection.
In Hologres, access_key_id is used as the PostgreSQL username,
and access_key_secret is used as the PostgreSQL password.
"""
host: str
port: int = 80
database: str
access_key_id: str
access_key_secret: str
schema_name: str = "public"
tokenizer: TokenizerType = "jieba"
distance_method: DistanceType = "Cosine"
base_quantization_type: BaseQuantizationType = "rabitq"
max_degree: int = 64
ef_construction: int = 400
@model_validator(mode="before")
@classmethod
def validate_config(cls, values: dict):
if not values.get("host"):
raise ValueError("config HOLOGRES_HOST is required")
if not values.get("database"):
raise ValueError("config HOLOGRES_DATABASE is required")
if not values.get("access_key_id"):
raise ValueError("config HOLOGRES_ACCESS_KEY_ID is required")
if not values.get("access_key_secret"):
raise ValueError("config HOLOGRES_ACCESS_KEY_SECRET is required")
return values
class HologresVector(BaseVector):
"""
Hologres vector storage implementation using holo-search-sdk.
Supports semantic search (vector), full-text search, and hybrid search.
"""
def __init__(self, collection_name: str, config: HologresVectorConfig):
super().__init__(collection_name)
self._config = config
self._client = self._init_client(config)
self.table_name = f"embedding_{collection_name}".lower()
def _init_client(self, config: HologresVectorConfig):
"""Initialize and return a holo-search-sdk client."""
client = holo.connect(
host=config.host,
port=config.port,
database=config.database,
access_key_id=config.access_key_id,
access_key_secret=config.access_key_secret,
schema=config.schema_name,
)
client.connect()
return client
def get_type(self) -> str:
return VectorType.HOLOGRES
def create(self, texts: list[Document], embeddings: list[list[float]], **kwargs):
"""Create collection table with vector and full-text indexes, then add texts."""
dimension = len(embeddings[0])
self._create_collection(dimension)
self.add_texts(texts, embeddings)
def add_texts(self, documents: list[Document], embeddings: list[list[float]], **kwargs):
"""Add texts with embeddings to the collection using batch upsert."""
if not documents:
return []
pks: list[str] = []
batch_size = 100
for i in range(0, len(documents), batch_size):
batch_docs = documents[i : i + batch_size]
batch_embeddings = embeddings[i : i + batch_size]
values = []
column_names = ["id", "text", "meta", "embedding"]
for j, doc in enumerate(batch_docs):
doc_id = doc.metadata.get("doc_id", "") if doc.metadata else ""
pks.append(doc_id)
values.append(
[
doc_id,
doc.page_content,
json.dumps(doc.metadata or {}),
batch_embeddings[j],
]
)
table = self._client.open_table(self.table_name)
table.upsert_multi(
index_column="id",
values=values,
column_names=column_names,
update=True,
update_columns=["text", "meta", "embedding"],
)
return pks
def text_exists(self, id: str) -> bool:
"""Check if a text with the given doc_id exists in the collection."""
if not self._client.check_table_exist(self.table_name):
return False
result = self._client.execute(
psql.SQL("SELECT 1 FROM {} WHERE id = {} LIMIT 1").format(
psql.Identifier(self.table_name), psql.Literal(id)
),
fetch_result=True,
)
return bool(result)
def get_ids_by_metadata_field(self, key: str, value: str) -> list[str] | None:
"""Get document IDs by metadata field key and value."""
result = self._client.execute(
psql.SQL("SELECT id FROM {} WHERE meta->>{} = {}").format(
psql.Identifier(self.table_name), psql.Literal(key), psql.Literal(value)
),
fetch_result=True,
)
if result:
return [row[0] for row in result]
return None
def delete_by_ids(self, ids: list[str]):
"""Delete documents by their doc_id list."""
if not ids:
return
if not self._client.check_table_exist(self.table_name):
return
self._client.execute(
psql.SQL("DELETE FROM {} WHERE id IN ({})").format(
psql.Identifier(self.table_name),
psql.SQL(", ").join(psql.Literal(id) for id in ids),
)
)
def delete_by_metadata_field(self, key: str, value: str):
"""Delete documents by metadata field key and value."""
if not self._client.check_table_exist(self.table_name):
return
self._client.execute(
psql.SQL("DELETE FROM {} WHERE meta->>{} = {}").format(
psql.Identifier(self.table_name), psql.Literal(key), psql.Literal(value)
)
)
def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Document]:
"""Search for documents by vector similarity."""
if not self._client.check_table_exist(self.table_name):
return []
top_k = kwargs.get("top_k", 4)
score_threshold = float(kwargs.get("score_threshold") or 0.0)
table = self._client.open_table(self.table_name)
query = (
table.search_vector(
vector=query_vector,
column="embedding",
distance_method=self._config.distance_method,
output_name="distance",
)
.select(["id", "text", "meta"])
.limit(top_k)
)
# Apply document_ids_filter if provided
document_ids_filter = kwargs.get("document_ids_filter")
if document_ids_filter:
filter_sql = psql.SQL("meta->>'document_id' IN ({})").format(
psql.SQL(", ").join(psql.Literal(id) for id in document_ids_filter)
)
query = query.where(filter_sql)
results = query.fetchall()
return self._process_vector_results(results, score_threshold)
def _process_vector_results(self, results: list, score_threshold: float) -> list[Document]:
"""Process vector search results into Document objects."""
docs = []
for row in results:
# row format: (distance, id, text, meta)
# distance is first because search_vector() adds the computed column before selected columns
distance = row[0]
text = row[2]
meta = row[3]
if isinstance(meta, str):
meta = json.loads(meta)
# Convert distance to similarity score (consistent with pgvector)
score = 1 - distance
meta["score"] = score
if score >= score_threshold:
docs.append(Document(page_content=text, metadata=meta))
return docs
def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]:
"""Search for documents by full-text search."""
if not self._client.check_table_exist(self.table_name):
return []
top_k = kwargs.get("top_k", 4)
table = self._client.open_table(self.table_name)
search_query = table.search_text(
column="text",
expression=query,
return_score=True,
return_score_name="score",
return_all_columns=True,
).limit(top_k)
# Apply document_ids_filter if provided
document_ids_filter = kwargs.get("document_ids_filter")
if document_ids_filter:
filter_sql = psql.SQL("meta->>'document_id' IN ({})").format(
psql.SQL(", ").join(psql.Literal(id) for id in document_ids_filter)
)
search_query = search_query.where(filter_sql)
results = search_query.fetchall()
return self._process_full_text_results(results)
def _process_full_text_results(self, results: list) -> list[Document]:
"""Process full-text search results into Document objects."""
docs = []
for row in results:
# row format: (id, text, meta, embedding, score)
text = row[1]
meta = row[2]
score = row[-1] # score is the last column from return_score
if isinstance(meta, str):
meta = json.loads(meta)
meta["score"] = score
docs.append(Document(page_content=text, metadata=meta))
return docs
def delete(self):
"""Delete the entire collection table."""
if self._client.check_table_exist(self.table_name):
self._client.drop_table(self.table_name)
def _create_collection(self, dimension: int):
"""Create the collection table with vector and full-text indexes."""
lock_name = f"vector_indexing_lock_{self._collection_name}"
with redis_client.lock(lock_name, timeout=20):
collection_exist_cache_key = f"vector_indexing_{self._collection_name}"
if redis_client.get(collection_exist_cache_key):
return
if not self._client.check_table_exist(self.table_name):
# Create table via SQL with CHECK constraint for vector dimension
create_table_sql = psql.SQL("""
CREATE TABLE IF NOT EXISTS {} (
id TEXT PRIMARY KEY,
text TEXT NOT NULL,
meta JSONB NOT NULL,
embedding float4[] NOT NULL
CHECK (array_ndims(embedding) = 1
AND array_length(embedding, 1) = {})
);
""").format(psql.Identifier(self.table_name), psql.Literal(dimension))
self._client.execute(create_table_sql)
# Wait for table to be fully ready before creating indexes
max_wait_seconds = 30
poll_interval = 2
for _ in range(max_wait_seconds // poll_interval):
if self._client.check_table_exist(self.table_name):
break
time.sleep(poll_interval)
else:
raise RuntimeError(f"Table {self.table_name} was not ready after {max_wait_seconds}s")
# Open table and set vector index
table = self._client.open_table(self.table_name)
table.set_vector_index(
column="embedding",
distance_method=self._config.distance_method,
base_quantization_type=self._config.base_quantization_type,
max_degree=self._config.max_degree,
ef_construction=self._config.ef_construction,
use_reorder=self._config.base_quantization_type == "rabitq",
)
# Create full-text search index
table.create_text_index(
index_name=f"ft_idx_{self._collection_name}",
column="text",
tokenizer=self._config.tokenizer,
)
redis_client.set(collection_exist_cache_key, 1, ex=3600)
class HologresVectorFactory(AbstractVectorFactory):
"""Factory class for creating HologresVector instances."""
def init_vector(self, dataset: Dataset, attributes: list, embeddings: Embeddings) -> HologresVector:
if dataset.index_struct_dict:
class_prefix: str = dataset.index_struct_dict["vector_store"]["class_prefix"]
collection_name = class_prefix
else:
dataset_id = dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
dataset.index_struct = json.dumps(self.gen_index_struct_dict(VectorType.HOLOGRES, collection_name))
return HologresVector(
collection_name=collection_name,
config=HologresVectorConfig(
host=dify_config.HOLOGRES_HOST or "",
port=dify_config.HOLOGRES_PORT,
database=dify_config.HOLOGRES_DATABASE or "",
access_key_id=dify_config.HOLOGRES_ACCESS_KEY_ID or "",
access_key_secret=dify_config.HOLOGRES_ACCESS_KEY_SECRET or "",
schema_name=dify_config.HOLOGRES_SCHEMA,
tokenizer=dify_config.HOLOGRES_TOKENIZER,
distance_method=dify_config.HOLOGRES_DISTANCE_METHOD,
base_quantization_type=dify_config.HOLOGRES_BASE_QUANTIZATION_TYPE,
max_degree=dify_config.HOLOGRES_MAX_DEGREE,
ef_construction=dify_config.HOLOGRES_EF_CONSTRUCTION,
),
)

View File

@@ -135,8 +135,8 @@ class PGVectoRS(BaseVector):
def get_ids_by_metadata_field(self, key: str, value: str):
result = None
with Session(self._client) as session:
select_statement = sql_text(f"SELECT id FROM {self._collection_name} WHERE meta->>:key = :value")
result = session.execute(select_statement, {"key": key, "value": value}).fetchall()
select_statement = sql_text(f"SELECT id FROM {self._collection_name} WHERE meta->>'{key}' = '{value}'; ")
result = session.execute(select_statement).fetchall()
if result:
return [item[0] for item in result]
else:
@@ -172,9 +172,9 @@ class PGVectoRS(BaseVector):
def text_exists(self, id: str) -> bool:
with Session(self._client) as session:
select_statement = sql_text(
f"SELECT id FROM {self._collection_name} WHERE meta->>'doc_id' = :doc_id limit 1"
f"SELECT id FROM {self._collection_name} WHERE meta->>'doc_id' = '{id}' limit 1; "
)
result = session.execute(select_statement, {"doc_id": id}).fetchall()
result = session.execute(select_statement).fetchall()
return len(result) > 0
def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Document]:

View File

@@ -154,8 +154,10 @@ class RelytVector(BaseVector):
def get_ids_by_metadata_field(self, key: str, value: str):
result = None
with Session(self.client) as session:
select_statement = sql_text(f"""SELECT id FROM "{self._collection_name}" WHERE metadata->>:key = :value""")
result = session.execute(select_statement, {"key": key, "value": value}).fetchall()
select_statement = sql_text(
f"""SELECT id FROM "{self._collection_name}" WHERE metadata->>'{key}' = '{value}'; """
)
result = session.execute(select_statement).fetchall()
if result:
return [item[0] for item in result]
else:
@@ -199,10 +201,11 @@ class RelytVector(BaseVector):
def delete_by_ids(self, ids: list[str]):
with Session(self.client) as session:
ids_str = ",".join(f"'{doc_id}'" for doc_id in ids)
select_statement = sql_text(
f"""SELECT id FROM "{self._collection_name}" WHERE metadata->>'doc_id' = ANY(:doc_ids)"""
f"""SELECT id FROM "{self._collection_name}" WHERE metadata->>'doc_id' in ({ids_str}); """
)
result = session.execute(select_statement, {"doc_ids": ids}).fetchall()
result = session.execute(select_statement).fetchall()
if result:
ids = [item[0] for item in result]
self.delete_by_uuids(ids)
@@ -215,9 +218,9 @@ class RelytVector(BaseVector):
def text_exists(self, id: str) -> bool:
with Session(self.client) as session:
select_statement = sql_text(
f"""SELECT id FROM "{self._collection_name}" WHERE metadata->>'doc_id' = :doc_id limit 1"""
f"""SELECT id FROM "{self._collection_name}" WHERE metadata->>'doc_id' = '{id}' limit 1; """
)
result = session.execute(select_statement, {"doc_id": id}).fetchall()
result = session.execute(select_statement).fetchall()
return len(result) > 0
def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Document]:

View File

@@ -38,7 +38,7 @@ class AbstractVectorFactory(ABC):
class Vector:
def __init__(self, dataset: Dataset, attributes: list | None = None):
if attributes is None:
attributes = ["doc_id", "dataset_id", "document_id", "doc_hash", "doc_type"]
attributes = ["doc_id", "dataset_id", "document_id", "doc_hash"]
self._dataset = dataset
self._embeddings = self._get_embeddings()
self._attributes = attributes
@@ -191,10 +191,6 @@ class Vector:
from core.rag.datasource.vdb.iris.iris_vector import IrisVectorFactory
return IrisVectorFactory
case VectorType.HOLOGRES:
from core.rag.datasource.vdb.hologres.hologres_vector import HologresVectorFactory
return HologresVectorFactory
case _:
raise ValueError(f"Vector store {vector_type} is not supported.")

View File

@@ -34,4 +34,3 @@ class VectorType(StrEnum):
MATRIXONE = "matrixone"
CLICKZETTA = "clickzetta"
IRIS = "iris"
HOLOGRES = "hologres"

View File

@@ -196,7 +196,6 @@ class WeaviateVector(BaseVector):
),
wc.Property(name="document_id", data_type=wc.DataType.TEXT),
wc.Property(name="doc_id", data_type=wc.DataType.TEXT),
wc.Property(name="doc_type", data_type=wc.DataType.TEXT),
wc.Property(name="chunk_index", data_type=wc.DataType.INT),
],
vector_config=wc.Configure.Vectors.self_provided(),
@@ -226,8 +225,6 @@ class WeaviateVector(BaseVector):
to_add.append(wc.Property(name="document_id", data_type=wc.DataType.TEXT))
if "doc_id" not in existing:
to_add.append(wc.Property(name="doc_id", data_type=wc.DataType.TEXT))
if "doc_type" not in existing:
to_add.append(wc.Property(name="doc_type", data_type=wc.DataType.TEXT))
if "chunk_index" not in existing:
to_add.append(wc.Property(name="chunk_index", data_type=wc.DataType.INT))

View File

@@ -9,8 +9,8 @@ from flask import current_app
from sqlalchemy import delete, func, select
from core.db.session_factory import session_factory
from core.workflow.nodes.knowledge_index.exc import KnowledgeIndexNodeError
from core.workflow.nodes.knowledge_index.protocols import Preview, PreviewItem, QaPreview
from dify_graph.nodes.knowledge_index.exc import KnowledgeIndexNodeError
from dify_graph.repositories.index_processor_protocol import Preview, PreviewItem, QaPreview
from models.dataset import Dataset, Document, DocumentSegment
from .index_processor_factory import IndexProcessorFactory

View File

@@ -294,7 +294,7 @@ class BaseIndexProcessor(ABC):
logging.warning("Error downloading image from %s: %s", image_url, str(e))
return None
except Exception:
logging.warning("Unexpected error downloading image from %s", image_url, exc_info=True)
logging.exception("Unexpected error downloading image from %s", image_url)
return None
def _download_tool_file(self, tool_file_id: str, current_user: Account) -> str | None:

View File

@@ -56,18 +56,18 @@ from core.rag.retrieval.template_prompts import (
)
from core.tools.signature import sign_upload_file
from core.tools.utils.dataset_retriever.dataset_retriever_base_tool import DatasetRetrieverBaseTool
from core.workflow.nodes.knowledge_retrieval import exc
from core.workflow.nodes.knowledge_retrieval.retrieval import (
KnowledgeRetrievalRequest,
Source,
SourceChildChunk,
SourceMetadata,
)
from dify_graph.file import File, FileTransferMethod, FileType
from dify_graph.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMUsage
from dify_graph.model_runtime.entities.message_entities import PromptMessage, PromptMessageRole, PromptMessageTool
from dify_graph.model_runtime.entities.model_entities import ModelFeature, ModelType
from dify_graph.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from dify_graph.nodes.knowledge_retrieval import exc
from dify_graph.repositories.rag_retrieval_protocol import (
KnowledgeRetrievalRequest,
Source,
SourceChildChunk,
SourceMetadata,
)
from extensions.ext_database import db
from extensions.ext_redis import redis_client
from libs.json_in_md_parser import parse_and_check_json_markdown

View File

@@ -18,7 +18,7 @@ from tenacity import before_sleep_log, retry, retry_if_exception, stop_after_att
from configs import dify_config
from dify_graph.entities import WorkflowNodeExecution
from dify_graph.enums import WorkflowNodeExecutionMetadataKey, WorkflowNodeExecutionStatus
from dify_graph.enums import NodeType, WorkflowNodeExecutionMetadataKey, WorkflowNodeExecutionStatus
from dify_graph.model_runtime.utils.encoders import jsonable_encoder
from dify_graph.repositories.workflow_node_execution_repository import OrderConfig, WorkflowNodeExecutionRepository
from dify_graph.workflow_type_encoder import WorkflowRuntimeTypeConverter
@@ -146,7 +146,7 @@ class SQLAlchemyWorkflowNodeExecutionRepository(WorkflowNodeExecutionRepository)
index=db_model.index,
predecessor_node_id=db_model.predecessor_node_id,
node_id=db_model.node_id,
node_type=db_model.node_type,
node_type=NodeType(db_model.node_type),
title=db_model.title,
inputs=inputs,
process_data=process_data,

View File

@@ -116,7 +116,6 @@ class ToolParameterConfigurationManager:
return a deep copy of parameters with decrypted values
"""
parameters = self._deep_copy(parameters)
cache = ToolParameterCache(
tenant_id=self.tenant_id,

View File

@@ -3,7 +3,7 @@ from typing import Any
from core.tools.entities.tool_entities import WorkflowToolParameterConfiguration
from core.tools.errors import WorkflowToolHumanInputNotSupportedError
from dify_graph.enums import BuiltinNodeTypes
from dify_graph.enums import NodeType
from dify_graph.nodes.base.entities import OutputVariableEntity
from dify_graph.variables.input_entities import VariableEntity
@@ -51,7 +51,7 @@ class WorkflowToolConfigurationUtils:
def ensure_no_human_input_nodes(cls, graph: Mapping[str, Any]) -> None:
nodes = graph.get("nodes", [])
for node in nodes:
if node.get("data", {}).get("type") == BuiltinNodeTypes.HUMAN_INPUT:
if node.get("data", {}).get("type") == NodeType.HUMAN_INPUT:
raise WorkflowToolHumanInputNotSupportedError()
@classmethod

View File

@@ -1,18 +0,0 @@
from typing import Final
TRIGGER_WEBHOOK_NODE_TYPE: Final[str] = "trigger-webhook"
TRIGGER_SCHEDULE_NODE_TYPE: Final[str] = "trigger-schedule"
TRIGGER_PLUGIN_NODE_TYPE: Final[str] = "trigger-plugin"
TRIGGER_INFO_METADATA_KEY: Final[str] = "trigger_info"
TRIGGER_NODE_TYPES: Final[frozenset[str]] = frozenset(
{
TRIGGER_WEBHOOK_NODE_TYPE,
TRIGGER_SCHEDULE_NODE_TYPE,
TRIGGER_PLUGIN_NODE_TYPE,
}
)
def is_trigger_node_type(node_type: str) -> bool:
return node_type in TRIGGER_NODE_TYPES

View File

@@ -11,11 +11,6 @@ from typing import Any
from pydantic import BaseModel
from core.plugin.entities.request import TriggerInvokeEventResponse
from core.trigger.constants import (
TRIGGER_PLUGIN_NODE_TYPE,
TRIGGER_SCHEDULE_NODE_TYPE,
TRIGGER_WEBHOOK_NODE_TYPE,
)
from core.trigger.debug.event_bus import TriggerDebugEventBus
from core.trigger.debug.events import (
PluginTriggerDebugEvent,
@@ -24,9 +19,10 @@ from core.trigger.debug.events import (
build_plugin_pool_key,
build_webhook_pool_key,
)
from core.workflow.nodes.trigger_plugin.entities import TriggerEventNodeData
from core.workflow.nodes.trigger_schedule.entities import ScheduleConfig
from dify_graph.entities.graph_config import NodeConfigDict
from dify_graph.enums import NodeType
from dify_graph.nodes.trigger_plugin.entities import TriggerEventNodeData
from dify_graph.nodes.trigger_schedule.entities import ScheduleConfig
from extensions.ext_redis import redis_client
from libs.datetime_utils import ensure_naive_utc, naive_utc_now
from libs.schedule_utils import calculate_next_run_at
@@ -210,19 +206,21 @@ def create_event_poller(
if not node_config:
raise ValueError("Node data not found for node %s", node_id)
node_type = draft_workflow.get_node_type_from_node_config(node_config)
if node_type == TRIGGER_PLUGIN_NODE_TYPE:
return PluginTriggerDebugEventPoller(
tenant_id=tenant_id, user_id=user_id, app_id=app_id, node_config=node_config, node_id=node_id
)
if node_type == TRIGGER_WEBHOOK_NODE_TYPE:
return WebhookTriggerDebugEventPoller(
tenant_id=tenant_id, user_id=user_id, app_id=app_id, node_config=node_config, node_id=node_id
)
if node_type == TRIGGER_SCHEDULE_NODE_TYPE:
return ScheduleTriggerDebugEventPoller(
tenant_id=tenant_id, user_id=user_id, app_id=app_id, node_config=node_config, node_id=node_id
)
raise ValueError("unable to create event poller for node type %s", node_type)
match node_type:
case NodeType.TRIGGER_PLUGIN:
return PluginTriggerDebugEventPoller(
tenant_id=tenant_id, user_id=user_id, app_id=app_id, node_config=node_config, node_id=node_id
)
case NodeType.TRIGGER_WEBHOOK:
return WebhookTriggerDebugEventPoller(
tenant_id=tenant_id, user_id=user_id, app_id=app_id, node_config=node_config, node_id=node_id
)
case NodeType.TRIGGER_SCHEDULE:
return ScheduleTriggerDebugEventPoller(
tenant_id=tenant_id, user_id=user_id, app_id=app_id, node_config=node_config, node_id=node_id
)
case _:
raise ValueError("unable to create event poller for node type %s", node_type)
def select_trigger_debug_events(

View File

@@ -1 +1,4 @@
"""Core workflow package."""
from .node_factory import DifyNodeFactory
from .workflow_entry import WorkflowEntry
__all__ = ["DifyNodeFactory", "WorkflowEntry"]

View File

@@ -1,7 +1,4 @@
import importlib
import pkgutil
from collections.abc import Callable, Iterator, Mapping, MutableMapping
from functools import lru_cache
from collections.abc import Callable, Mapping
from typing import TYPE_CHECKING, Any, TypeAlias, cast, final
from sqlalchemy import select
@@ -11,6 +8,7 @@ from typing_extensions import override
from configs import dify_config
from core.app.entities.app_invoke_entities import DifyRunContext
from core.app.llm.model_access import build_dify_model_access
from core.datasource.datasource_manager import DatasourceManager
from core.helper.code_executor.code_executor import (
CodeExecutionError,
CodeExecutor,
@@ -19,9 +17,12 @@ from core.helper.ssrf_proxy import ssrf_proxy
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_manager import ModelInstance
from core.prompt.entities.advanced_prompt_entities import MemoryConfig
from core.rag.index_processor.index_processor import IndexProcessor
from core.rag.retrieval.dataset_retrieval import DatasetRetrieval
from core.rag.summary_index.summary_index import SummaryIndex
from core.repositories.human_input_repository import HumanInputFormRepositoryImpl
from core.tools.tool_file_manager import ToolFileManager
from core.trigger.constants import TRIGGER_NODE_TYPES
from core.workflow.node_resolution import resolve_workflow_node_class
from core.workflow.nodes.agent.message_transformer import AgentMessageTransformer
from core.workflow.nodes.agent.plugin_strategy_adapter import (
PluginAgentStrategyPresentationProvider,
@@ -31,7 +32,7 @@ from core.workflow.nodes.agent.runtime_support import AgentRuntimeSupport
from dify_graph.entities.base_node_data import BaseNodeData
from dify_graph.entities.graph_config import NodeConfigDict, NodeConfigDictAdapter
from dify_graph.entities.graph_init_params import DIFY_RUN_CONTEXT_KEY
from dify_graph.enums import BuiltinNodeTypes, NodeType, SystemVariableKey
from dify_graph.enums import NodeType, SystemVariableKey
from dify_graph.file.file_manager import file_manager
from dify_graph.graph.graph import NodeFactory
from dify_graph.model_runtime.entities.model_entities import ModelType
@@ -45,7 +46,6 @@ from dify_graph.nodes.document_extractor import UnstructuredApiConfig
from dify_graph.nodes.http_request import build_http_request_config
from dify_graph.nodes.llm.entities import LLMNodeData
from dify_graph.nodes.llm.exc import LLMModeRequiredError, ModelNotExistError
from dify_graph.nodes.llm.protocols import TemplateRenderer
from dify_graph.nodes.parameter_extractor.entities import ParameterExtractorNodeData
from dify_graph.nodes.question_classifier.entities import QuestionClassifierNodeData
from dify_graph.nodes.template_transform.template_renderer import (
@@ -59,135 +59,6 @@ if TYPE_CHECKING:
from dify_graph.entities import GraphInitParams
from dify_graph.runtime import GraphRuntimeState
LATEST_VERSION = "latest"
_START_NODE_TYPES: frozenset[NodeType] = frozenset(
(BuiltinNodeTypes.START, BuiltinNodeTypes.DATASOURCE, *TRIGGER_NODE_TYPES)
)
def _import_node_package(package_name: str, *, excluded_modules: frozenset[str] = frozenset()) -> None:
package = importlib.import_module(package_name)
for _, module_name, _ in pkgutil.walk_packages(package.__path__, package.__name__ + "."):
if module_name in excluded_modules:
continue
importlib.import_module(module_name)
@lru_cache(maxsize=1)
def register_nodes() -> None:
"""Import production node modules so they self-register with ``Node``."""
_import_node_package("dify_graph.nodes")
_import_node_package("core.workflow.nodes")
def get_node_type_classes_mapping() -> Mapping[NodeType, Mapping[str, type[Node]]]:
"""Return a read-only snapshot of the current production node registry.
The workflow layer owns node bootstrap because it must compose built-in
`dify_graph.nodes.*` implementations with workflow-local nodes under
`core.workflow.nodes.*`. Keeping this import side effect here avoids
reintroducing registry bootstrapping into lower-level graph primitives.
"""
register_nodes()
return Node.get_node_type_classes_mapping()
def resolve_workflow_node_class(*, node_type: NodeType, node_version: str) -> type[Node]:
node_mapping = get_node_type_classes_mapping().get(node_type)
if not node_mapping:
raise ValueError(f"No class mapping found for node type: {node_type}")
latest_node_class = node_mapping.get(LATEST_VERSION)
matched_node_class = node_mapping.get(node_version)
node_class = matched_node_class or latest_node_class
if not node_class:
raise ValueError(f"No latest version class found for node type: {node_type}")
return node_class
def is_start_node_type(node_type: NodeType) -> bool:
"""Return True when the node type can serve as a workflow entry point."""
return node_type in _START_NODE_TYPES
def get_default_root_node_id(graph_config: Mapping[str, Any]) -> str:
"""Resolve the default entry node for a persisted top-level workflow graph.
This workflow-layer helper depends on start-node semantics defined by
`is_start_node_type`, so it intentionally lives next to the node registry
instead of in the raw `dify_graph.entities.graph_config` schema module.
"""
nodes = graph_config.get("nodes")
if not isinstance(nodes, list):
raise ValueError("nodes in workflow graph must be a list")
for node in nodes:
if not isinstance(node, Mapping):
continue
if node.get("type") == "custom-note":
continue
node_id = node.get("id")
data = node.get("data")
if not isinstance(node_id, str) or not isinstance(data, Mapping):
continue
node_type = data.get("type")
if isinstance(node_type, str) and is_start_node_type(node_type):
return node_id
raise ValueError("Unable to determine default root node ID from workflow graph")
class _LazyNodeTypeClassesMapping(MutableMapping[NodeType, Mapping[str, type[Node]]]):
"""Mutable dict-like view over the current node registry."""
def __init__(self) -> None:
self._cached_snapshot: dict[NodeType, Mapping[str, type[Node]]] = {}
self._cached_version = -1
self._deleted: set[NodeType] = set()
self._overrides: dict[NodeType, Mapping[str, type[Node]]] = {}
def _snapshot(self) -> dict[NodeType, Mapping[str, type[Node]]]:
current_version = Node.get_registry_version()
if self._cached_version != current_version:
self._cached_snapshot = dict(get_node_type_classes_mapping())
self._cached_version = current_version
if not self._deleted and not self._overrides:
return self._cached_snapshot
snapshot = {key: value for key, value in self._cached_snapshot.items() if key not in self._deleted}
snapshot.update(self._overrides)
return snapshot
def __getitem__(self, key: NodeType) -> Mapping[str, type[Node]]:
return self._snapshot()[key]
def __setitem__(self, key: NodeType, value: Mapping[str, type[Node]]) -> None:
self._deleted.discard(key)
self._overrides[key] = value
def __delitem__(self, key: NodeType) -> None:
if key in self._overrides:
del self._overrides[key]
return
if key in self._cached_snapshot:
self._deleted.add(key)
return
raise KeyError(key)
def __iter__(self) -> Iterator[NodeType]:
return iter(self._snapshot())
def __len__(self) -> int:
return len(self._snapshot())
# Keep the canonical node-class mapping in the workflow layer that also bootstraps
# legacy `core.workflow.nodes.*` registrations.
NODE_TYPE_CLASSES_MAPPING: MutableMapping[NodeType, Mapping[str, type[Node]]] = _LazyNodeTypeClassesMapping()
LLMCompatibleNodeData: TypeAlias = LLMNodeData | QuestionClassifierNodeData | ParameterExtractorNodeData
@@ -229,16 +100,6 @@ class DefaultWorkflowCodeExecutor:
return isinstance(error, CodeExecutionError)
class DefaultLLMTemplateRenderer(TemplateRenderer):
def render_jinja2(self, *, template: str, inputs: Mapping[str, Any]) -> str:
result = CodeExecutor.execute_workflow_code_template(
language=CodeLanguage.JINJA2,
code=template,
inputs=inputs,
)
return str(result.get("result", ""))
@final
class DifyNodeFactory(NodeFactory):
"""
@@ -265,11 +126,11 @@ class DifyNodeFactory(NodeFactory):
max_object_array_length=dify_config.CODE_MAX_OBJECT_ARRAY_LENGTH,
)
self._template_renderer = CodeExecutorJinja2TemplateRenderer(code_executor=self._code_executor)
self._llm_template_renderer: TemplateRenderer = DefaultLLMTemplateRenderer()
self._template_transform_max_output_length = dify_config.TEMPLATE_TRANSFORM_MAX_LENGTH
self._http_request_http_client = ssrf_proxy
self._http_request_tool_file_manager_factory = ToolFileManager
self._http_request_file_manager = file_manager
self._rag_retrieval = DatasetRetrieval()
self._document_extractor_unstructured_api_config = UnstructuredApiConfig(
api_url=dify_config.UNSTRUCTURED_API_URL,
api_key=dify_config.UNSTRUCTURED_API_KEY or "",
@@ -316,46 +177,56 @@ class DifyNodeFactory(NodeFactory):
node_class = self._resolve_node_class(node_type=node_data.type, node_version=str(node_data.version))
node_type = node_data.type
node_init_kwargs_factories: Mapping[NodeType, Callable[[], dict[str, object]]] = {
BuiltinNodeTypes.CODE: lambda: {
NodeType.CODE: lambda: {
"code_executor": self._code_executor,
"code_limits": self._code_limits,
},
BuiltinNodeTypes.TEMPLATE_TRANSFORM: lambda: {
NodeType.TEMPLATE_TRANSFORM: lambda: {
"template_renderer": self._template_renderer,
"max_output_length": self._template_transform_max_output_length,
},
BuiltinNodeTypes.HTTP_REQUEST: lambda: {
NodeType.HTTP_REQUEST: lambda: {
"http_request_config": self._http_request_config,
"http_client": self._http_request_http_client,
"tool_file_manager_factory": self._http_request_tool_file_manager_factory,
"file_manager": self._http_request_file_manager,
},
BuiltinNodeTypes.HUMAN_INPUT: lambda: {
NodeType.HUMAN_INPUT: lambda: {
"form_repository": HumanInputFormRepositoryImpl(tenant_id=self._dify_context.tenant_id),
},
BuiltinNodeTypes.LLM: lambda: self._build_llm_compatible_node_init_kwargs(
NodeType.KNOWLEDGE_INDEX: lambda: {
"index_processor": IndexProcessor(),
"summary_index_service": SummaryIndex(),
},
NodeType.LLM: lambda: self._build_llm_compatible_node_init_kwargs(
node_class=node_class,
node_data=node_data,
include_http_client=True,
),
BuiltinNodeTypes.DOCUMENT_EXTRACTOR: lambda: {
NodeType.DATASOURCE: lambda: {
"datasource_manager": DatasourceManager,
},
NodeType.KNOWLEDGE_RETRIEVAL: lambda: {
"rag_retrieval": self._rag_retrieval,
},
NodeType.DOCUMENT_EXTRACTOR: lambda: {
"unstructured_api_config": self._document_extractor_unstructured_api_config,
"http_client": self._http_request_http_client,
},
BuiltinNodeTypes.QUESTION_CLASSIFIER: lambda: self._build_llm_compatible_node_init_kwargs(
NodeType.QUESTION_CLASSIFIER: lambda: self._build_llm_compatible_node_init_kwargs(
node_class=node_class,
node_data=node_data,
include_http_client=True,
),
BuiltinNodeTypes.PARAMETER_EXTRACTOR: lambda: self._build_llm_compatible_node_init_kwargs(
NodeType.PARAMETER_EXTRACTOR: lambda: self._build_llm_compatible_node_init_kwargs(
node_class=node_class,
node_data=node_data,
include_http_client=False,
),
BuiltinNodeTypes.TOOL: lambda: {
NodeType.TOOL: lambda: {
"tool_file_manager_factory": self._http_request_tool_file_manager_factory(),
},
BuiltinNodeTypes.AGENT: lambda: {
NodeType.AGENT: lambda: {
"strategy_resolver": self._agent_strategy_resolver,
"presentation_provider": self._agent_strategy_presentation_provider,
"runtime_support": self._agent_runtime_support,
@@ -403,8 +274,6 @@ class DifyNodeFactory(NodeFactory):
model_instance=model_instance,
),
}
if validated_node_data.type in {BuiltinNodeTypes.LLM, BuiltinNodeTypes.QUESTION_CLASSIFIER}:
node_init_kwargs["template_renderer"] = self._llm_template_renderer
if include_http_client:
node_init_kwargs["http_client"] = self._http_request_http_client
return node_init_kwargs

View File

@@ -0,0 +1,42 @@
from __future__ import annotations
from collections.abc import Mapping
from importlib import import_module
from dify_graph.enums import NodeType
from dify_graph.nodes.base.node import Node
from dify_graph.nodes.node_mapping import LATEST_VERSION, get_node_type_classes_mapping
_WORKFLOW_NODE_MODULES = ("core.workflow.nodes.agent",)
_workflow_nodes_registered = False
def ensure_workflow_nodes_registered() -> None:
"""Import workflow-local node modules so they can register with `Node.__init_subclass__`."""
global _workflow_nodes_registered
if _workflow_nodes_registered:
return
for module_name in _WORKFLOW_NODE_MODULES:
import_module(module_name)
_workflow_nodes_registered = True
def get_workflow_node_type_classes_mapping() -> Mapping[NodeType, Mapping[str, type[Node]]]:
ensure_workflow_nodes_registered()
return get_node_type_classes_mapping()
def resolve_workflow_node_class(*, node_type: NodeType, node_version: str) -> type[Node]:
node_mapping = get_workflow_node_type_classes_mapping().get(node_type)
if not node_mapping:
raise ValueError(f"No class mapping found for node type: {node_type}")
latest_node_class = node_mapping.get(LATEST_VERSION)
matched_node_class = node_mapping.get(node_version)
node_class = matched_node_class or latest_node_class
if not node_class:
raise ValueError(f"No latest version class found for node type: {node_type}")
return node_class

View File

@@ -1 +0,0 @@
"""Workflow node implementations that remain under the legacy core.workflow namespace."""

View File

@@ -4,7 +4,7 @@ from collections.abc import Generator, Mapping, Sequence
from typing import TYPE_CHECKING, Any
from dify_graph.entities.graph_config import NodeConfigDict
from dify_graph.enums import BuiltinNodeTypes, SystemVariableKey, WorkflowNodeExecutionStatus
from dify_graph.enums import NodeType, SystemVariableKey, WorkflowNodeExecutionStatus
from dify_graph.node_events import NodeEventBase, NodeRunResult, StreamCompletedEvent
from dify_graph.nodes.base.node import Node
from dify_graph.nodes.base.variable_template_parser import VariableTemplateParser
@@ -24,7 +24,7 @@ if TYPE_CHECKING:
class AgentNode(Node[AgentNodeData]):
node_type = BuiltinNodeTypes.AGENT
node_type = NodeType.AGENT
_strategy_resolver: AgentStrategyResolver
_presentation_provider: AgentStrategyPresentationProvider

View File

@@ -6,11 +6,11 @@ from pydantic import BaseModel
from core.prompt.entities.advanced_prompt_entities import MemoryConfig
from core.tools.entities.tool_entities import ToolSelector
from dify_graph.entities.base_node_data import BaseNodeData
from dify_graph.enums import BuiltinNodeTypes, NodeType
from dify_graph.enums import NodeType
class AgentNodeData(BaseNodeData):
type: NodeType = BuiltinNodeTypes.AGENT
type: NodeType = NodeType.AGENT
agent_strategy_provider_name: str
agent_strategy_name: str
agent_strategy_label: str

View File

@@ -8,7 +8,7 @@ from sqlalchemy.orm import Session
from core.tools.entities.tool_entities import ToolInvokeMessage
from core.tools.utils.message_transformer import ToolFileMessageTransformer
from dify_graph.enums import BuiltinNodeTypes, NodeType, WorkflowNodeExecutionMetadataKey, WorkflowNodeExecutionStatus
from dify_graph.enums import NodeType, WorkflowNodeExecutionMetadataKey, WorkflowNodeExecutionStatus
from dify_graph.file import File, FileTransferMethod
from dify_graph.model_runtime.entities.llm_entities import LLMUsage, LLMUsageMetadata
from dify_graph.model_runtime.utils.encoders import jsonable_encoder
@@ -123,7 +123,7 @@ class AgentMessageTransformer:
)
elif message.type == ToolInvokeMessage.MessageType.JSON:
assert isinstance(message.message, ToolInvokeMessage.JsonMessage)
if node_type == BuiltinNodeTypes.AGENT:
if node_type == NodeType.AGENT:
if isinstance(message.message.json_object, dict):
msg_metadata: dict[str, Any] = message.message.json_object.pop("execution_metadata", {})
llm_usage = LLMUsage.from_metadata(cast(LLMUsageMetadata, msg_metadata))

View File

@@ -1 +0,0 @@
"""Datasource workflow node package."""

View File

@@ -1,5 +0,0 @@
"""Knowledge index workflow node package."""
KNOWLEDGE_INDEX_NODE_TYPE = "knowledge-index"
__all__ = ["KNOWLEDGE_INDEX_NODE_TYPE"]

View File

@@ -1 +0,0 @@
"""Knowledge retrieval workflow node package."""

View File

@@ -1,3 +0,0 @@
from .trigger_schedule_node import TriggerScheduleNode
__all__ = ["TriggerScheduleNode"]

View File

@@ -8,7 +8,8 @@ from core.app.apps.exc import GenerateTaskStoppedError
from core.app.entities.app_invoke_entities import InvokeFrom, UserFrom, build_dify_run_context
from core.app.workflow.layers.llm_quota import LLMQuotaLayer
from core.app.workflow.layers.observability import ObservabilityLayer
from core.workflow.node_factory import DifyNodeFactory, resolve_workflow_node_class
from core.workflow.node_factory import DifyNodeFactory
from core.workflow.node_resolution import resolve_workflow_node_class
from dify_graph.constants import ENVIRONMENT_VARIABLE_NODE_ID
from dify_graph.entities import GraphInitParams
from dify_graph.entities.graph_config import NodeConfigDictAdapter
@@ -21,7 +22,7 @@ from dify_graph.graph_engine.layers import DebugLoggingLayer, ExecutionLimitsLay
from dify_graph.graph_engine.layers.base import GraphEngineLayer
from dify_graph.graph_engine.protocols.command_channel import CommandChannel
from dify_graph.graph_events import GraphEngineEvent, GraphNodeEventBase, GraphRunFailedEvent
from dify_graph.nodes import BuiltinNodeTypes
from dify_graph.nodes import NodeType
from dify_graph.nodes.base.node import Node
from dify_graph.runtime import ChildGraphNotFoundError, GraphRuntimeState, VariablePool
from dify_graph.system_variable import SystemVariable
@@ -252,7 +253,7 @@ class WorkflowEntry:
variable_mapping=variable_mapping,
user_inputs=user_inputs,
)
if node_type != BuiltinNodeTypes.DATASOURCE:
if node_type != NodeType.DATASOURCE:
cls.mapping_user_inputs_to_variable_pool(
variable_mapping=variable_mapping,
user_inputs=user_inputs,
@@ -302,7 +303,7 @@ class WorkflowEntry:
"height": node_height,
"type": "custom",
"data": {
"type": BuiltinNodeTypes.START,
"type": NodeType.START,
"title": "Start",
"desc": "Start",
},
@@ -338,8 +339,8 @@ class WorkflowEntry:
# Create a minimal graph for single node execution
graph_dict = cls._create_single_node_graph(node_id, node_data)
node_type = node_data.get("type", "")
if node_type not in {BuiltinNodeTypes.PARAMETER_EXTRACTOR, BuiltinNodeTypes.QUESTION_CLASSIFIER}:
node_type = NodeType(node_data.get("type", ""))
if node_type not in {NodeType.PARAMETER_EXTRACTOR, NodeType.QUESTION_CLASSIFIER}:
raise ValueError(f"Node type {node_type} not supported")
node_cls = resolve_workflow_node_class(node_type=node_type, node_version="1")

View File

@@ -113,7 +113,7 @@ The codebase enforces strict layering via import-linter:
1. Create node class in `nodes/<node_type>/`
1. Inherit from `BaseNode` or appropriate base class
1. Implement `_run()` method
1. Ensure the node module is importable under `nodes/<node_type>/`
1. Register in `nodes/node_mapping.py`
1. Add tests in `tests/unit_tests/dify_graph/nodes/`
### Implementing a Custom Layer

View File

@@ -121,8 +121,6 @@ class DefaultValue(BaseModel):
class BaseNodeData(ABC, BaseModel):
# Raw graph payloads are first validated through `NodeConfigDictAdapter`, where
# `node["data"]` is typed as `BaseNodeData` before the concrete node class is known.
# `type` therefore accepts downstream string node kinds; unknown node implementations
# are rejected later when the node factory resolves the node registry.
# At that boundary, node-specific fields are still "extra" relative to this shared DTO,
# and persisted templates/workflows also carry undeclared compatibility keys such as
# `selected`, `params`, `paramSchemas`, and `datasource_label`. Keep extras permissive

View File

@@ -48,7 +48,7 @@ class WorkflowNodeExecution(BaseModel):
index: int # Sequence number for ordering in trace visualization
predecessor_node_id: str | None = None # ID of the node that executed before this one
node_id: str # ID of the node being executed
node_type: NodeType # Type of node (e.g., start, llm, downstream response node)
node_type: NodeType # Type of node (e.g., start, llm, knowledge)
title: str # Display title of the node
# Execution data

View File

@@ -1,5 +1,4 @@
from enum import StrEnum
from typing import ClassVar, TypeAlias
class NodeState(StrEnum):
@@ -34,71 +33,56 @@ class SystemVariableKey(StrEnum):
INVOKE_FROM = "invoke_from"
NodeType: TypeAlias = str
class NodeType(StrEnum):
START = "start"
END = "end"
ANSWER = "answer"
LLM = "llm"
KNOWLEDGE_RETRIEVAL = "knowledge-retrieval"
KNOWLEDGE_INDEX = "knowledge-index"
IF_ELSE = "if-else"
CODE = "code"
TEMPLATE_TRANSFORM = "template-transform"
QUESTION_CLASSIFIER = "question-classifier"
HTTP_REQUEST = "http-request"
TOOL = "tool"
DATASOURCE = "datasource"
VARIABLE_AGGREGATOR = "variable-aggregator"
LEGACY_VARIABLE_AGGREGATOR = "variable-assigner" # TODO: Merge this into VARIABLE_AGGREGATOR in the database.
LOOP = "loop"
LOOP_START = "loop-start"
LOOP_END = "loop-end"
ITERATION = "iteration"
ITERATION_START = "iteration-start" # Fake start node for iteration.
PARAMETER_EXTRACTOR = "parameter-extractor"
VARIABLE_ASSIGNER = "assigner"
DOCUMENT_EXTRACTOR = "document-extractor"
LIST_OPERATOR = "list-operator"
AGENT = "agent"
TRIGGER_WEBHOOK = "trigger-webhook"
TRIGGER_SCHEDULE = "trigger-schedule"
TRIGGER_PLUGIN = "trigger-plugin"
HUMAN_INPUT = "human-input"
@property
def is_trigger_node(self) -> bool:
"""Check if this node type is a trigger node."""
return self in [
NodeType.TRIGGER_WEBHOOK,
NodeType.TRIGGER_SCHEDULE,
NodeType.TRIGGER_PLUGIN,
]
class BuiltinNodeTypes:
"""Built-in node type string constants.
`node_type` values are plain strings throughout the graph runtime. This namespace
only exposes the built-in values shipped by `dify_graph`; downstream packages can
use additional strings without extending this class.
"""
START: ClassVar[NodeType] = "start"
END: ClassVar[NodeType] = "end"
ANSWER: ClassVar[NodeType] = "answer"
LLM: ClassVar[NodeType] = "llm"
KNOWLEDGE_RETRIEVAL: ClassVar[NodeType] = "knowledge-retrieval"
IF_ELSE: ClassVar[NodeType] = "if-else"
CODE: ClassVar[NodeType] = "code"
TEMPLATE_TRANSFORM: ClassVar[NodeType] = "template-transform"
QUESTION_CLASSIFIER: ClassVar[NodeType] = "question-classifier"
HTTP_REQUEST: ClassVar[NodeType] = "http-request"
TOOL: ClassVar[NodeType] = "tool"
DATASOURCE: ClassVar[NodeType] = "datasource"
VARIABLE_AGGREGATOR: ClassVar[NodeType] = "variable-aggregator"
LEGACY_VARIABLE_AGGREGATOR: ClassVar[NodeType] = "variable-assigner"
LOOP: ClassVar[NodeType] = "loop"
LOOP_START: ClassVar[NodeType] = "loop-start"
LOOP_END: ClassVar[NodeType] = "loop-end"
ITERATION: ClassVar[NodeType] = "iteration"
ITERATION_START: ClassVar[NodeType] = "iteration-start"
PARAMETER_EXTRACTOR: ClassVar[NodeType] = "parameter-extractor"
VARIABLE_ASSIGNER: ClassVar[NodeType] = "assigner"
DOCUMENT_EXTRACTOR: ClassVar[NodeType] = "document-extractor"
LIST_OPERATOR: ClassVar[NodeType] = "list-operator"
AGENT: ClassVar[NodeType] = "agent"
HUMAN_INPUT: ClassVar[NodeType] = "human-input"
BUILT_IN_NODE_TYPES: tuple[NodeType, ...] = (
BuiltinNodeTypes.START,
BuiltinNodeTypes.END,
BuiltinNodeTypes.ANSWER,
BuiltinNodeTypes.LLM,
BuiltinNodeTypes.KNOWLEDGE_RETRIEVAL,
BuiltinNodeTypes.IF_ELSE,
BuiltinNodeTypes.CODE,
BuiltinNodeTypes.TEMPLATE_TRANSFORM,
BuiltinNodeTypes.QUESTION_CLASSIFIER,
BuiltinNodeTypes.HTTP_REQUEST,
BuiltinNodeTypes.TOOL,
BuiltinNodeTypes.DATASOURCE,
BuiltinNodeTypes.VARIABLE_AGGREGATOR,
BuiltinNodeTypes.LEGACY_VARIABLE_AGGREGATOR,
BuiltinNodeTypes.LOOP,
BuiltinNodeTypes.LOOP_START,
BuiltinNodeTypes.LOOP_END,
BuiltinNodeTypes.ITERATION,
BuiltinNodeTypes.ITERATION_START,
BuiltinNodeTypes.PARAMETER_EXTRACTOR,
BuiltinNodeTypes.VARIABLE_ASSIGNER,
BuiltinNodeTypes.DOCUMENT_EXTRACTOR,
BuiltinNodeTypes.LIST_OPERATOR,
BuiltinNodeTypes.AGENT,
BuiltinNodeTypes.HUMAN_INPUT,
)
@property
def is_start_node(self) -> bool:
"""Check if this node type can serve as a workflow entry point."""
return self in [
NodeType.START,
NodeType.DATASOURCE,
NodeType.TRIGGER_WEBHOOK,
NodeType.TRIGGER_SCHEDULE,
NodeType.TRIGGER_PLUGIN,
]
class NodeExecutionType(StrEnum):
@@ -252,6 +236,7 @@ class WorkflowNodeExecutionMetadataKey(StrEnum):
CURRENCY = "currency"
TOOL_INFO = "tool_info"
AGENT_LOG = "agent_log"
TRIGGER_INFO = "trigger_info"
ITERATION_ID = "iteration_id"
ITERATION_INDEX = "iteration_index"
LOOP_ID = "loop_id"

View File

@@ -83,6 +83,50 @@ class Graph:
return node_configs_map
@classmethod
def _find_root_node_id(
cls,
node_configs_map: Mapping[str, NodeConfigDict],
edge_configs: Sequence[Mapping[str, object]],
root_node_id: str | None = None,
) -> str:
"""
Find the root node ID if not specified.
:param node_configs_map: mapping of node ID to node config
:param edge_configs: list of edge configurations
:param root_node_id: explicitly specified root node ID
:return: determined root node ID
"""
if root_node_id:
if root_node_id not in node_configs_map:
raise ValueError(f"Root node id {root_node_id} not found in the graph")
return root_node_id
# Find nodes with no incoming edges
nodes_with_incoming: set[str] = set()
for edge_config in edge_configs:
target = edge_config.get("target")
if isinstance(target, str):
nodes_with_incoming.add(target)
root_candidates = [nid for nid in node_configs_map if nid not in nodes_with_incoming]
# Prefer START node if available
start_node_id = None
for nid in root_candidates:
node_data = node_configs_map[nid]["data"]
if node_data.type.is_start_node:
start_node_id = nid
break
root_node_id = start_node_id or (root_candidates[0] if root_candidates else None)
if not root_node_id:
raise ValueError("Unable to determine root node ID")
return root_node_id
@classmethod
def _build_edges(
cls, edge_configs: list[dict[str, object]]
@@ -257,15 +301,15 @@ class Graph:
*,
graph_config: Mapping[str, object],
node_factory: NodeFactory,
root_node_id: str,
root_node_id: str | None = None,
skip_validation: bool = False,
) -> Graph:
"""
Initialize a graph with an explicit execution entry point.
Initialize graph
:param graph_config: graph config containing nodes and edges
:param node_factory: factory for creating node instances from config data
:param root_node_id: active root node id
:param root_node_id: root node id
:return: graph instance
"""
# Parse configs
@@ -283,8 +327,8 @@ class Graph:
# Parse node configurations
node_configs_map = cls._parse_node_configs(node_configs)
if root_node_id not in node_configs_map:
raise ValueError(f"Root node id {root_node_id} not found in the graph")
# Find root node
root_node_id = cls._find_root_node_id(node_configs_map, edge_configs, root_node_id)
# Build edges
edges, in_edges, out_edges = cls._build_edges(edge_configs)

View File

@@ -4,7 +4,7 @@ from collections.abc import Sequence
from dataclasses import dataclass
from typing import TYPE_CHECKING, Protocol
from dify_graph.enums import BuiltinNodeTypes, NodeExecutionType, NodeType
from dify_graph.enums import NodeExecutionType, NodeType
if TYPE_CHECKING:
from .graph import Graph
@@ -71,7 +71,7 @@ class _RootNodeValidator:
"""Validates root node invariants."""
invalid_root_code: str = "INVALID_ROOT"
container_entry_types: tuple[NodeType, ...] = (BuiltinNodeTypes.ITERATION_START, BuiltinNodeTypes.LOOP_START)
container_entry_types: tuple[NodeType, ...] = (NodeType.ITERATION_START, NodeType.LOOP_START)
def validate(self, graph: Graph) -> Sequence[GraphValidationIssue]:
root_node = graph.root_node
@@ -86,7 +86,7 @@ class _RootNodeValidator:
)
return issues
node_type = root_node.node_type
node_type = getattr(root_node, "node_type", None)
if root_node.execution_type != NodeExecutionType.ROOT and node_type not in self.container_entry_types:
issues.append(
GraphValidationIssue(
@@ -114,9 +114,45 @@ class GraphValidator:
raise GraphValidationError(issues)
@dataclass(frozen=True, slots=True)
class _TriggerStartExclusivityValidator:
"""Ensures trigger nodes do not coexist with UserInput (start) nodes."""
conflict_code: str = "TRIGGER_START_NODE_CONFLICT"
def validate(self, graph: Graph) -> Sequence[GraphValidationIssue]:
start_node_id: str | None = None
trigger_node_ids: list[str] = []
for node in graph.nodes.values():
node_type = getattr(node, "node_type", None)
if not isinstance(node_type, NodeType):
continue
if node_type == NodeType.START:
start_node_id = node.id
elif node_type.is_trigger_node:
trigger_node_ids.append(node.id)
if start_node_id and trigger_node_ids:
trigger_list = ", ".join(trigger_node_ids)
return [
GraphValidationIssue(
code=self.conflict_code,
message=(
f"UserInput (start) node '{start_node_id}' cannot coexist with trigger nodes: {trigger_list}."
),
node_id=start_node_id,
)
]
return []
_DEFAULT_RULES: tuple[GraphValidationRule, ...] = (
_EdgeEndpointValidator(),
_RootNodeValidator(),
_TriggerStartExclusivityValidator(),
)

View File

@@ -6,6 +6,5 @@ of responses based on upstream node outputs and constants.
"""
from .coordinator import ResponseStreamCoordinator
from .session import RESPONSE_SESSION_NODE_TYPES
__all__ = ["RESPONSE_SESSION_NODE_TYPES", "ResponseStreamCoordinator"]
__all__ = ["ResponseStreamCoordinator"]

View File

@@ -3,34 +3,19 @@ Internal response session management for response coordinator.
This module contains the private ResponseSession class used internally
by ResponseStreamCoordinator to manage streaming sessions.
`RESPONSE_SESSION_NODE_TYPES` is intentionally mutable so downstream applications
can opt additional response-capable node types into session creation without
patching the coordinator.
"""
from __future__ import annotations
from dataclasses import dataclass
from typing import Protocol, cast
from dify_graph.enums import BuiltinNodeTypes, NodeType
from dify_graph.nodes.answer.answer_node import AnswerNode
from dify_graph.nodes.base.template import Template
from dify_graph.nodes.end.end_node import EndNode
from dify_graph.nodes.knowledge_index import KnowledgeIndexNode
from dify_graph.runtime.graph_runtime_state import NodeProtocol
class _ResponseSessionNodeProtocol(NodeProtocol, Protocol):
"""Structural contract required from nodes that can open a response session."""
def get_streaming_template(self) -> Template: ...
RESPONSE_SESSION_NODE_TYPES: list[NodeType] = [
BuiltinNodeTypes.ANSWER,
BuiltinNodeTypes.END,
]
@dataclass
class ResponseSession:
"""
@@ -48,9 +33,10 @@ class ResponseSession:
"""
Create a ResponseSession from a response-capable node.
The parameter is typed as `NodeProtocol` because the graph is exposed behind a protocol at the runtime layer.
At runtime this must be a node whose `node_type` is listed in `RESPONSE_SESSION_NODE_TYPES`
and which implements `get_streaming_template()`.
The parameter is typed as `NodeProtocol` because the graph is exposed behind a protocol at the runtime layer,
but at runtime this must be an `AnswerNode`, `EndNode`, or `KnowledgeIndexNode` that provides:
- `id: str`
- `get_streaming_template() -> Template`
Args:
node: Node from the materialized workflow graph.
@@ -61,22 +47,11 @@ class ResponseSession:
Raises:
TypeError: If node is not a supported response node type.
"""
if node.node_type not in RESPONSE_SESSION_NODE_TYPES:
supported_node_types = ", ".join(RESPONSE_SESSION_NODE_TYPES)
raise TypeError(
"ResponseSession.from_node only supports node types in "
f"RESPONSE_SESSION_NODE_TYPES: {supported_node_types}"
)
response_node = cast(_ResponseSessionNodeProtocol, node)
try:
template = response_node.get_streaming_template()
except AttributeError as exc:
raise TypeError("ResponseSession.from_node requires get_streaming_template() on response nodes") from exc
if not isinstance(node, AnswerNode | EndNode | KnowledgeIndexNode):
raise TypeError("ResponseSession.from_node only supports AnswerNode, EndNode, or KnowledgeIndexNode")
return cls(
node_id=node.id,
template=template,
template=node.get_streaming_template(),
)
def is_complete(self) -> bool:

View File

@@ -1,9 +1,9 @@
from collections.abc import Mapping, Sequence
from collections.abc import Sequence
from datetime import datetime
from typing import Any
from pydantic import Field
from core.rag.entities.citation_metadata import RetrievalSourceMetadata
from dify_graph.entities.pause_reason import PauseReason
from dify_graph.file import File
from dify_graph.model_runtime.entities.llm_entities import LLMUsage
@@ -13,7 +13,7 @@ from .base import NodeEventBase
class RunRetrieverResourceEvent(NodeEventBase):
retriever_resources: Sequence[Mapping[str, Any]] = Field(..., description="retriever resources")
retriever_resources: Sequence[RetrievalSourceMetadata] = Field(..., description="retriever resources")
context: str = Field(..., description="context")
context_files: list[File] | None = Field(default=None, description="context files")

View File

@@ -1,3 +1,3 @@
from dify_graph.enums import BuiltinNodeTypes
from dify_graph.enums import NodeType
__all__ = ["BuiltinNodeTypes"]
__all__ = ["NodeType"]

View File

@@ -1,7 +1,7 @@
from collections.abc import Mapping, Sequence
from typing import Any
from dify_graph.enums import BuiltinNodeTypes, NodeExecutionType, WorkflowNodeExecutionStatus
from dify_graph.enums import NodeExecutionType, NodeType, WorkflowNodeExecutionStatus
from dify_graph.node_events import NodeRunResult
from dify_graph.nodes.answer.entities import AnswerNodeData
from dify_graph.nodes.base.node import Node
@@ -11,7 +11,7 @@ from dify_graph.variables import ArrayFileSegment, FileSegment, Segment
class AnswerNode(Node[AnswerNodeData]):
node_type = BuiltinNodeTypes.ANSWER
node_type = NodeType.ANSWER
execution_type = NodeExecutionType.RESPONSE
@classmethod

View File

@@ -4,7 +4,7 @@ from enum import StrEnum, auto
from pydantic import BaseModel, Field
from dify_graph.entities.base_node_data import BaseNodeData
from dify_graph.enums import BuiltinNodeTypes, NodeType
from dify_graph.enums import NodeType
class AnswerNodeData(BaseNodeData):
@@ -12,7 +12,7 @@ class AnswerNodeData(BaseNodeData):
Answer Node Data.
"""
type: NodeType = BuiltinNodeTypes.ANSWER
type: NodeType = NodeType.ANSWER
answer: str = Field(..., description="answer template string")

View File

@@ -1,7 +1,9 @@
from __future__ import annotations
import importlib
import logging
import operator
import pkgutil
from abc import abstractmethod
from collections.abc import Generator, Mapping, Sequence
from functools import singledispatchmethod
@@ -159,7 +161,7 @@ class Node(Generic[NodeDataT]):
Example:
class CodeNode(Node[CodeNodeData]): # CodeNodeData is auto-extracted
node_type = BuiltinNodeTypes.CODE
node_type = NodeType.CODE
# No need to implement _get_title, _get_error_strategy, etc.
"""
super().__init_subclass__(**kwargs)
@@ -177,8 +179,7 @@ class Node(Generic[NodeDataT]):
# Skip base class itself
if cls is Node:
return
# Only register production node implementations defined under the
# canonical workflow namespaces.
# Only register production node implementations defined under dify_graph.nodes.*
# This prevents test helper subclasses from polluting the global registry and
# accidentally overriding real node types (e.g., a test Answer node).
module_name = getattr(cls, "__module__", "")
@@ -186,7 +187,7 @@ class Node(Generic[NodeDataT]):
node_type = cls.node_type
version = cls.version()
bucket = Node._registry.setdefault(node_type, {})
if module_name.startswith(("dify_graph.nodes.", "core.workflow.nodes.")):
if module_name.startswith("dify_graph.nodes."):
# Production node definitions take precedence and may override
bucket[version] = cls # type: ignore[index]
else:
@@ -202,7 +203,6 @@ class Node(Generic[NodeDataT]):
else:
latest_key = max(version_keys) if version_keys else version
bucket["latest"] = bucket[latest_key]
Node._registry_version += 1
@classmethod
def _extract_node_data_type_from_generic(cls) -> type[BaseNodeData] | None:
@@ -237,11 +237,6 @@ class Node(Generic[NodeDataT]):
# Global registry populated via __init_subclass__
_registry: ClassVar[dict[NodeType, dict[str, type[Node]]]] = {}
_registry_version: ClassVar[int] = 0
@classmethod
def get_registry_version(cls) -> int:
return cls._registry_version
def __init__(
self,
@@ -274,10 +269,6 @@ class Node(Generic[NodeDataT]):
"""Validate shared graph node payloads against the subclass-declared NodeData model."""
return cast(NodeDataT, cls._node_data_type.model_validate(node_data, from_attributes=True))
def init_node_data(self, data: BaseNodeData | Mapping[str, Any]) -> None:
"""Hydrate `_node_data` for legacy callers that bypass `__init__`."""
self._node_data = self.validate_node_data(cast(BaseNodeData, data))
def post_init(self) -> None:
"""Optional hook for subclasses requiring extra initialization."""
return
@@ -498,19 +489,29 @@ class Node(Generic[NodeDataT]):
def version(cls) -> str:
"""`node_version` returns the version of current node type."""
# NOTE(QuantumGhost): Node versions must remain unique per `NodeType` so
# registry lookups can resolve numeric versions and `latest`.
# `Node.get_node_type_classes_mapping()` can resolve numeric versions and `latest`.
raise NotImplementedError("subclasses of BaseNode must implement `version` method.")
@classmethod
def get_node_type_classes_mapping(cls) -> Mapping[NodeType, Mapping[str, type[Node]]]:
"""Return a read-only view of the currently registered node classes.
"""Return mapping of NodeType -> {version -> Node subclass} using __init_subclass__ registry.
This accessor intentionally performs no imports. The embedding layer that
owns bootstrap (for example `core.workflow.node_factory`) must import any
extension node packages before calling it so their subclasses register via
`__init_subclass__`.
Import all modules under dify_graph.nodes so subclasses register themselves on import.
Callers that rely on workflow-local nodes defined outside `dify_graph.nodes` must import
those modules before invoking this method so they can register through `__init_subclass__`.
We then return a readonly view of the registry to avoid accidental mutation.
"""
return {node_type: MappingProxyType(version_map) for node_type, version_map in cls._registry.items()}
# Import all node modules to ensure they are loaded (thus registered)
import dify_graph.nodes as _nodes_pkg
for _, _modname, _ in pkgutil.walk_packages(_nodes_pkg.__path__, _nodes_pkg.__name__ + "."):
# Avoid importing modules that depend on the registry to prevent circular imports.
if _modname == "dify_graph.nodes.node_mapping":
continue
importlib.import_module(_modname)
# Return a readonly view so callers can't mutate the registry by accident
return {nt: MappingProxyType(ver_map) for nt, ver_map in cls._registry.items()}
@property
def retry(self) -> bool:
@@ -785,16 +786,11 @@ class Node(Generic[NodeDataT]):
@_dispatch.register
def _(self, event: RunRetrieverResourceEvent) -> NodeRunRetrieverResourceEvent:
from core.rag.entities.citation_metadata import RetrievalSourceMetadata
retriever_resources = [
RetrievalSourceMetadata.model_validate(resource) for resource in event.retriever_resources
]
return NodeRunRetrieverResourceEvent(
id=self.execution_id,
node_id=self._node_id,
node_type=self.node_type,
retriever_resources=retriever_resources,
retriever_resources=event.retriever_resources,
context=event.context,
node_version=self.version(),
)

View File

@@ -4,7 +4,7 @@ from textwrap import dedent
from typing import TYPE_CHECKING, Any, Protocol, cast
from dify_graph.entities.graph_config import NodeConfigDict
from dify_graph.enums import BuiltinNodeTypes, WorkflowNodeExecutionStatus
from dify_graph.enums import NodeType, WorkflowNodeExecutionStatus
from dify_graph.node_events import NodeRunResult
from dify_graph.nodes.base.node import Node
from dify_graph.nodes.code.entities import CodeLanguage, CodeNodeData
@@ -72,7 +72,7 @@ _DEFAULT_CODE_BY_LANGUAGE: Mapping[CodeLanguage, str] = {
class CodeNode(Node[CodeNodeData]):
node_type = BuiltinNodeTypes.CODE
node_type = NodeType.CODE
_limits: CodeNodeLimits
def __init__(

View File

@@ -4,7 +4,7 @@ from typing import Annotated, Literal
from pydantic import AfterValidator, BaseModel
from dify_graph.entities.base_node_data import BaseNodeData
from dify_graph.enums import BuiltinNodeTypes, NodeType
from dify_graph.enums import NodeType
from dify_graph.nodes.base.entities import VariableSelector
from dify_graph.variables.types import SegmentType
@@ -40,7 +40,7 @@ class CodeNodeData(BaseNodeData):
Code Node Data.
"""
type: NodeType = BuiltinNodeTypes.CODE
type: NodeType = NodeType.CODE
class Output(BaseModel):
type: Annotated[SegmentType, AfterValidator(_validate_type)]

View File

@@ -0,0 +1,3 @@
from .datasource_node import DatasourceNode
__all__ = ["DatasourceNode"]

View File

@@ -1,17 +1,22 @@
from collections.abc import Generator, Mapping, Sequence
from typing import TYPE_CHECKING, Any
from core.datasource.datasource_manager import DatasourceManager
from core.datasource.entities.datasource_entities import DatasourceProviderType
from core.plugin.impl.exc import PluginDaemonClientSideError
from dify_graph.entities.graph_config import NodeConfigDict
from dify_graph.entities.workflow_node_execution import WorkflowNodeExecutionStatus
from dify_graph.enums import BuiltinNodeTypes, NodeExecutionType, SystemVariableKey, WorkflowNodeExecutionMetadataKey
from dify_graph.enums import NodeExecutionType, NodeType, SystemVariableKey
from dify_graph.node_events import NodeRunResult, StreamCompletedEvent
from dify_graph.nodes.base.node import Node
from dify_graph.nodes.base.variable_template_parser import VariableTemplateParser
from dify_graph.repositories.datasource_manager_protocol import (
DatasourceManagerProtocol,
DatasourceParameter,
OnlineDriveDownloadFileParam,
)
from .entities import DatasourceNodeData, DatasourceParameter, OnlineDriveDownloadFileParam
from ...entities.workflow_node_execution import WorkflowNodeExecutionMetadataKey
from .entities import DatasourceNodeData
from .exc import DatasourceNodeError
if TYPE_CHECKING:
@@ -24,7 +29,7 @@ class DatasourceNode(Node[DatasourceNodeData]):
Datasource Node
"""
node_type = BuiltinNodeTypes.DATASOURCE
node_type = NodeType.DATASOURCE
execution_type = NodeExecutionType.ROOT
def __init__(
@@ -33,6 +38,7 @@ class DatasourceNode(Node[DatasourceNodeData]):
config: NodeConfigDict,
graph_init_params: "GraphInitParams",
graph_runtime_state: "GraphRuntimeState",
datasource_manager: DatasourceManagerProtocol,
):
super().__init__(
id=id,
@@ -40,7 +46,7 @@ class DatasourceNode(Node[DatasourceNodeData]):
graph_init_params=graph_init_params,
graph_runtime_state=graph_runtime_state,
)
self.datasource_manager = DatasourceManager
self.datasource_manager = datasource_manager
def populate_start_event(self, event) -> None:
event.provider_id = f"{self.node_data.plugin_id}/{self.node_data.provider_name}"

View File

@@ -4,7 +4,7 @@ from pydantic import BaseModel, field_validator
from pydantic_core.core_schema import ValidationInfo
from dify_graph.entities.base_node_data import BaseNodeData
from dify_graph.enums import BuiltinNodeTypes, NodeType
from dify_graph.enums import NodeType
class DatasourceEntity(BaseModel):
@@ -17,7 +17,7 @@ class DatasourceEntity(BaseModel):
class DatasourceNodeData(BaseNodeData, DatasourceEntity):
type: NodeType = BuiltinNodeTypes.DATASOURCE
type: NodeType = NodeType.DATASOURCE
class DatasourceInput(BaseModel):
# TODO: check this type
@@ -42,14 +42,3 @@ class DatasourceNodeData(BaseNodeData, DatasourceEntity):
return typ
datasource_parameters: dict[str, DatasourceInput] | None = None
class DatasourceParameter(BaseModel):
workspace_id: str
page_id: str
type: str
class OnlineDriveDownloadFileParam(BaseModel):
id: str
bucket: str

View File

@@ -2,11 +2,11 @@ from collections.abc import Sequence
from dataclasses import dataclass
from dify_graph.entities.base_node_data import BaseNodeData
from dify_graph.enums import BuiltinNodeTypes, NodeType
from dify_graph.enums import NodeType
class DocumentExtractorNodeData(BaseNodeData):
type: NodeType = BuiltinNodeTypes.DOCUMENT_EXTRACTOR
type: NodeType = NodeType.DOCUMENT_EXTRACTOR
variable_selector: Sequence[str]

View File

@@ -22,7 +22,7 @@ from docx.table import Table
from docx.text.paragraph import Paragraph
from dify_graph.entities.graph_config import NodeConfigDict
from dify_graph.enums import BuiltinNodeTypes, WorkflowNodeExecutionStatus
from dify_graph.enums import NodeType, WorkflowNodeExecutionStatus
from dify_graph.file import File, FileTransferMethod, file_manager
from dify_graph.node_events import NodeRunResult
from dify_graph.nodes.base.node import Node
@@ -46,7 +46,7 @@ class DocumentExtractorNode(Node[DocumentExtractorNodeData]):
Supports plain text, PDF, and DOC/DOCX files.
"""
node_type = BuiltinNodeTypes.DOCUMENT_EXTRACTOR
node_type = NodeType.DOCUMENT_EXTRACTOR
@classmethod
def version(cls) -> str:

View File

@@ -1,4 +1,4 @@
from dify_graph.enums import BuiltinNodeTypes, NodeExecutionType, WorkflowNodeExecutionStatus
from dify_graph.enums import NodeExecutionType, NodeType, WorkflowNodeExecutionStatus
from dify_graph.node_events import NodeRunResult
from dify_graph.nodes.base.node import Node
from dify_graph.nodes.base.template import Template
@@ -6,7 +6,7 @@ from dify_graph.nodes.end.entities import EndNodeData
class EndNode(Node[EndNodeData]):
node_type = BuiltinNodeTypes.END
node_type = NodeType.END
execution_type = NodeExecutionType.RESPONSE
@classmethod

View File

@@ -1,7 +1,7 @@
from pydantic import BaseModel, Field
from dify_graph.entities.base_node_data import BaseNodeData
from dify_graph.enums import BuiltinNodeTypes, NodeType
from dify_graph.enums import NodeType
from dify_graph.nodes.base.entities import OutputVariableEntity
@@ -10,7 +10,7 @@ class EndNodeData(BaseNodeData):
END Node Data.
"""
type: NodeType = BuiltinNodeTypes.END
type: NodeType = NodeType.END
outputs: list[OutputVariableEntity]

View File

@@ -9,7 +9,7 @@ import httpx
from pydantic import BaseModel, Field, ValidationInfo, field_validator
from dify_graph.entities.base_node_data import BaseNodeData
from dify_graph.enums import BuiltinNodeTypes, NodeType
from dify_graph.enums import NodeType
HTTP_REQUEST_CONFIG_FILTER_KEY = "http_request_config"
@@ -90,7 +90,7 @@ class HttpRequestNodeData(BaseNodeData):
Code Node Data.
"""
type: NodeType = BuiltinNodeTypes.HTTP_REQUEST
type: NodeType = NodeType.HTTP_REQUEST
method: Literal[
"get",
"post",

View File

@@ -4,7 +4,7 @@ from collections.abc import Callable, Mapping, Sequence
from typing import TYPE_CHECKING, Any
from dify_graph.entities.graph_config import NodeConfigDict
from dify_graph.enums import BuiltinNodeTypes, WorkflowNodeExecutionStatus
from dify_graph.enums import NodeType, WorkflowNodeExecutionStatus
from dify_graph.file import File, FileTransferMethod
from dify_graph.node_events import NodeRunResult
from dify_graph.nodes.base import variable_template_parser
@@ -33,7 +33,7 @@ if TYPE_CHECKING:
class HttpRequestNode(Node[HttpRequestNodeData]):
node_type = BuiltinNodeTypes.HTTP_REQUEST
node_type = NodeType.HTTP_REQUEST
def __init__(
self,

View File

@@ -11,7 +11,7 @@ from typing import Annotated, Any, ClassVar, Literal, Self
from pydantic import BaseModel, Field, field_validator, model_validator
from dify_graph.entities.base_node_data import BaseNodeData
from dify_graph.enums import BuiltinNodeTypes, NodeType
from dify_graph.enums import NodeType
from dify_graph.nodes.base.variable_template_parser import VariableTemplateParser
from dify_graph.runtime import VariablePool
from dify_graph.variables.consts import SELECTORS_LENGTH
@@ -215,7 +215,7 @@ class UserAction(BaseModel):
class HumanInputNodeData(BaseNodeData):
"""Human Input node data."""
type: NodeType = BuiltinNodeTypes.HUMAN_INPUT
type: NodeType = NodeType.HUMAN_INPUT
delivery_methods: list[DeliveryChannelConfig] = Field(default_factory=list)
form_content: str = ""
inputs: list[FormInput] = Field(default_factory=list)

View File

@@ -5,7 +5,7 @@ from typing import TYPE_CHECKING, Any
from dify_graph.entities.graph_config import NodeConfigDict
from dify_graph.entities.pause_reason import HumanInputRequired
from dify_graph.enums import BuiltinNodeTypes, NodeExecutionType, WorkflowNodeExecutionStatus
from dify_graph.enums import NodeExecutionType, NodeType, WorkflowNodeExecutionStatus
from dify_graph.node_events import (
HumanInputFormFilledEvent,
HumanInputFormTimeoutEvent,
@@ -40,7 +40,7 @@ logger = logging.getLogger(__name__)
class HumanInputNode(Node[HumanInputNodeData]):
node_type = BuiltinNodeTypes.HUMAN_INPUT
node_type = NodeType.HUMAN_INPUT
execution_type = NodeExecutionType.BRANCH
_BRANCH_SELECTION_KEYS: tuple[str, ...] = (

View File

@@ -3,7 +3,7 @@ from typing import Literal
from pydantic import BaseModel, Field
from dify_graph.entities.base_node_data import BaseNodeData
from dify_graph.enums import BuiltinNodeTypes, NodeType
from dify_graph.enums import NodeType
from dify_graph.utils.condition.entities import Condition
@@ -12,7 +12,7 @@ class IfElseNodeData(BaseNodeData):
If Else Node Data.
"""
type: NodeType = BuiltinNodeTypes.IF_ELSE
type: NodeType = NodeType.IF_ELSE
class Case(BaseModel):
"""

View File

@@ -3,7 +3,7 @@ from typing import Any, Literal
from typing_extensions import deprecated
from dify_graph.enums import BuiltinNodeTypes, NodeExecutionType, WorkflowNodeExecutionStatus
from dify_graph.enums import NodeExecutionType, NodeType, WorkflowNodeExecutionStatus
from dify_graph.node_events import NodeRunResult
from dify_graph.nodes.base.node import Node
from dify_graph.nodes.if_else.entities import IfElseNodeData
@@ -13,7 +13,7 @@ from dify_graph.utils.condition.processor import ConditionProcessor
class IfElseNode(Node[IfElseNodeData]):
node_type = BuiltinNodeTypes.IF_ELSE
node_type = NodeType.IF_ELSE
execution_type = NodeExecutionType.BRANCH
@classmethod

View File

@@ -4,7 +4,7 @@ from typing import Any
from pydantic import Field
from dify_graph.entities.base_node_data import BaseNodeData
from dify_graph.enums import BuiltinNodeTypes, NodeType
from dify_graph.enums import NodeType
from dify_graph.nodes.base import BaseIterationNodeData, BaseIterationState
@@ -19,7 +19,7 @@ class IterationNodeData(BaseIterationNodeData):
Iteration Node Data.
"""
type: NodeType = BuiltinNodeTypes.ITERATION
type: NodeType = NodeType.ITERATION
parent_loop_id: str | None = None # redundant field, not used currently
iterator_selector: list[str] # variable selector
output_selector: list[str] # output selector
@@ -34,7 +34,7 @@ class IterationStartNodeData(BaseNodeData):
Iteration Start Node Data.
"""
type: NodeType = BuiltinNodeTypes.ITERATION_START
type: NodeType = NodeType.ITERATION_START
class IterationState(BaseIterationState):

View File

@@ -9,8 +9,8 @@ from typing_extensions import TypeIs
from dify_graph.constants import CONVERSATION_VARIABLE_NODE_ID
from dify_graph.entities.graph_config import NodeConfigDictAdapter
from dify_graph.enums import (
BuiltinNodeTypes,
NodeExecutionType,
NodeType,
WorkflowNodeExecutionMetadataKey,
WorkflowNodeExecutionStatus,
)
@@ -62,7 +62,7 @@ class IterationNode(LLMUsageTrackingMixin, Node[IterationNodeData]):
Iteration Node.
"""
node_type = BuiltinNodeTypes.ITERATION
node_type = NodeType.ITERATION
execution_type = NodeExecutionType.CONTAINER
@classmethod
@@ -485,9 +485,12 @@ class IterationNode(LLMUsageTrackingMixin, Node[IterationNodeData]):
# variable selector to variable mapping
try:
# Get node class
from dify_graph.nodes.node_mapping import get_node_type_classes_mapping
typed_sub_node_config = NodeConfigDictAdapter.validate_python(sub_node_config)
node_type = typed_sub_node_config["data"].type
node_mapping = Node.get_node_type_classes_mapping()
node_mapping = get_node_type_classes_mapping()
if node_type not in node_mapping:
continue
node_version = str(typed_sub_node_config["data"].version)
@@ -560,7 +563,7 @@ class IterationNode(LLMUsageTrackingMixin, Node[IterationNodeData]):
raise IterationIndexNotFoundError(f"iteration {self._node_id} current index not found")
current_index = index_variable.value
for event in rst:
if isinstance(event, GraphNodeEventBase) and event.node_type == BuiltinNodeTypes.ITERATION_START:
if isinstance(event, GraphNodeEventBase) and event.node_type == NodeType.ITERATION_START:
continue
if isinstance(event, GraphNodeEventBase):

View File

@@ -1,4 +1,4 @@
from dify_graph.enums import BuiltinNodeTypes, WorkflowNodeExecutionStatus
from dify_graph.enums import NodeType, WorkflowNodeExecutionStatus
from dify_graph.node_events import NodeRunResult
from dify_graph.nodes.base.node import Node
from dify_graph.nodes.iteration.entities import IterationStartNodeData
@@ -9,7 +9,7 @@ class IterationStartNode(Node[IterationStartNodeData]):
Iteration Start Node.
"""
node_type = BuiltinNodeTypes.ITERATION_START
node_type = NodeType.ITERATION_START
@classmethod
def version(cls) -> str:

Some files were not shown because too many files have changed in this diff Show More